diff --git a/.asf.yaml b/.asf.yaml index 8c1a5d51fdf..4d979a18833 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -59,6 +59,7 @@ github: - hsato03 - bernardodemarco - abh1sar + - FelipeM525 protected_branches: ~ diff --git a/.github/linters/.flake8 b/.github/linters/.flake8 index f250719ca19..3364ad14f29 100644 --- a/.github/linters/.flake8 +++ b/.github/linters/.flake8 @@ -22,8 +22,11 @@ # E224 Tab after operator # E227 Missing whitespace around bitwise or shift operator # E242 Tab after ',' +# E271 Multiple spaces after keyword +# E272 Multiple spaces before keyword # E273 Tab after keyword # E274 Tab before keyword +# E713 Test for membership should be 'not in' # E742 Do not define classes named 'I', 'O', or 'l' # E743 Do not define functions named 'I', 'O', or 'l' # E901 SyntaxError or IndentationError @@ -37,4 +40,4 @@ exclude = .git, venv -select = E112,E113,E133,E223,E224,E227,E242,E273,E274,E742,E743,E901,E902,W291,W292,W293,W391 +select = E112,E113,E133,E223,E224,E227,E242,E271,E272,E273,E274,E713,E742,E743,E901,E902,W291,W292,W293,W391 diff --git a/.github/linters/codespell.txt b/.github/linters/codespell.txt new file mode 100644 index 00000000000..37b3e6de1cb --- /dev/null +++ b/.github/linters/codespell.txt @@ -0,0 +1,526 @@ +accouns +acheived +acount +actuall +acuiring +acumulate +addreess +addtion +adminstrator +afer +afrer +afterall +againt +ags +aktive +algoritm +allo +alloacate +allocted +alocation +alogrithm +alpha-numeric +alue +ammended +ammount +ans +anull +apche +aplication +apllication +applicatio +apporpriate +appropritate +aqcuire +aqcuired +aquire +aquiring +assiciate +assigne +assoication +assosiate +asssert +astroid +asynchroniously +asyncronous +atleast +atomation +attache +attch +attches +authenciation +authenitcation +authenitication +availiability +avialable +bais +beacause +beacuse +becase +becasue +becaues +behviour +birdge +bject +boardcast +bootstraper +bu +cant +capabilites +capablity +capcity +carrefully +cavaet +chaing +checkd +childs +choosen +chould +clenup +cliente +clinet +cluser +cna +collison +comman +commited +comparision +comparisions +complient +concious +conectix +confg +configruation +configuable +conneciton +connexion +constrait +constraits +containg +contex +continuesly +contro +controler +controles +controll +convienient +convinience +coputer +correcponding +correspoding +correspoonds +cosole +coudl +couldnt +craete +craeted +crate +crated +createa +createing +credentail +cros +crresponding +curren +currentl +datas +decalared +declatory +decocdes +decypher +defalut +defaut +defered +definiton +deleteable +dependancy +dependant +dependend +deployement +deply +deplying +dervied +descktop +descrption +deserialzed +desination +detination +detroy +detroying +dettach +dettached +dettaching +diabling +diasbled +dictonary +didnt +differnet +differnt +direcotry +directroy +disale +disbale +discrepency +disover +dissapper +dissassociated +divice +doesn' +doesnot +doesnt +dont' +doubleclick +dows +eanbled +earch +ect +elemnt +eles +elments +emmited +enble +encryted +enebled +enmpty +entires +enviornment +environmnet +equivalant +erro +erronous +everthing +everytime +excetion +excption +excute +execept +execption +execut +executeable +exeeded +exisitng +exisits +existin +existsing +exitting +expcted +expection +explaination +explicitely +faield +faild +failes +falied +fasion +feild +filenname +fillled +findout +fisrt +fo +folowing +fowarding +frist +fro +frontent +fuctionality +genarate +generallly +gernerate +get's +gloabal +gorry +gracefull +gradiant +handeling +hanling +happend +hasing +hasnt +hda +hostanme +hould +hsould +hte +identifers +identifyer +identifyers +igoring +immediatley +implememented +implementor +implementors +implemnt +implict +implmeneted +implmentation +incase +includeing +incosistency +indecates +indien +infor +informations +informaton +infrastrcuture +ingore +inital +initalize +initator +initilization +inspite +instace +instal +instnace +intefaces +intepret +intereface +interfer +interpretted +intialize +intializes +intializing +invlaid +invokation +isnt +ist +klunky +lable +leve +lief +limite +linke +listner +lokal +lokales +maintainence +maintenace +maintenence +mamagement +mambers +manaully +manuel +maxium +mehtod +mergable +mesage +messge +metatdata +milisecond +minumum +mis +modifers +mor +mot +mulitply +multipl +multple +mutliple +nast +nd +neccessary +necesary +netowrk +nin +nodel +nome +noone +nowe +numbe +numer +occured +occurence +occuring +offfering +ofthe +omited +onother +opeation +optin +orginal +otherwse +outter +overriden +overwritting +paramater +paramemeter +paramenter +paramete +parametrs +pararmeter +parms +parralels +particualr +passowrd +perfromed +permissble +physcial +plugable +pluging +polcies +policys +poluting +possiblity +potenial +prefered +preffered +pressenter +previleges +primay +priviledged +procuct +programatically +progres +properites +propertie +propertys +propogate +provison +psudo +pyhsical +readabilty +readd +reccuring +recevied +recieved +recursivelly +redunant +refference +releease +relese +remaning +remore +remvoing +renabling +repeatly +reponse +reqest +reqiured +requieres +requried +reserv +reserverd +reseted +reseting +resorce +responser +resposne +resturns +retreive +retreiving +retrive +retrived +retriving +retrun +retuned +returing +re-use +rever +rocessor +runing +runnign +sate +scalled +scipt +scirpt +scrip +seconadry +seconday +seesion +sepcified +sepcify +seprated +ser +servies +seting +settig +sevices +shoul +shoule +sie +signle +simplier +singature +skiping +snaphsot +snpashot +specied +specifed +specifiy +splitted +spped +standy +statics +stickyness +stil +stip +storeage +strat +streched +strutural +succesfull +successfull +suceessful +suces +sucessfully +suiteable +suppots +suppport +syncronous +syste +tage +te +tempdate +testng +tha +thats +ther +therefor +theres +theses +thi +thorugh +throught +ths +tipically +transction +tring +trough +tyoe +ue +ues +unavailibility +uncommited +uncompressible +uneccessarily +unexepected +unexpect +unknow +unkonw +unkown +unneccessary +unparseable +unrecoginized +unsupport +unxpected +updat +uptodate +usera +usign +usin +utlization +vaidate +valiate +valule +valus +varibles +verfy +verfying +verifing +virutal +visable +wakup +wil +wit +wll +wth diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1c6c90a6183..fd3c8f8ac67 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -43,7 +43,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: '3.8' + python-version: '3.10' architecture: 'x64' - name: Install Build Dependencies diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fac2d6266fa..ceffb42c79b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -35,6 +35,7 @@ jobs: fail-fast: false matrix: tests: [ "smoke/test_accounts + smoke/test_account_access smoke/test_affinity_groups smoke/test_affinity_groups_projects smoke/test_annotations @@ -86,7 +87,9 @@ jobs: smoke/test_migration smoke/test_multipleips_per_nic smoke/test_nested_virtualization - smoke/test_set_sourcenat", + smoke/test_set_sourcenat + smoke/test_webhook_lifecycle + smoke/test_purge_expunged_vms", "smoke/test_network smoke/test_network_acl smoke/test_network_ipv6 @@ -132,6 +135,7 @@ jobs: smoke/test_usage smoke/test_usage_events smoke/test_vm_deployment_planner + smoke/test_vm_strict_host_tags smoke/test_vm_schedule smoke/test_vm_life_cycle smoke/test_vm_lifecycle_unmanage_import @@ -222,7 +226,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: '3.8' + python-version: '3.10' architecture: 'x64' - name: Install Build Dependencies @@ -280,7 +284,7 @@ jobs: - name: Start CloudStack Management Server with Simulator run: | - export MAVEN_OPTS="-Xmx4096m -XX:MaxPermSize=800m -Djava.security.egd=file:/dev/urandom -javaagent:jacoco/lib/jacocoagent.jar=address=*,port=36320,output=tcpserver" + export MAVEN_OPTS="-Xmx4096m -XX:MaxMetaspaceSize=800m -Djava.security.egd=file:/dev/urandom -javaagent:jacoco/lib/jacocoagent.jar=address=*,port=36320,output=tcpserver --add-opens=java.base/java.lang=ALL-UNNAMED --add-exports=java.base/sun.security.x509=ALL-UNNAMED --add-opens=java.base/jdk.internal.reflect=ALL-UNNAMED" echo -e "\nStarting simulator" set +e mvn -Dsimulator -Dorg.eclipse.jetty.annotations.maxWait=120 -pl :cloud-client-ui jetty:run 2>&1 > /tmp/jetty-log || true & diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml index a8f923d708d..c77783746ca 100644 --- a/.github/workflows/codecov.yml +++ b/.github/workflows/codecov.yml @@ -36,11 +36,11 @@ jobs: with: fetch-depth: 0 - - name: Set up JDK11 + - name: Set up JDK 17 uses: actions/setup-java@v4 with: distribution: 'temurin' - java-version: '11' + java-version: '17' cache: 'maven' - name: Build CloudStack with Quality Checks diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 784df0cf03c..b6c814a36f4 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -39,7 +39,7 @@ jobs: pip install pre-commit - name: Set PY run: echo "PY=$(python -VV | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: ~/.cache/pre-commit key: pre-commit|${{ env.PY }}|${{ hashFiles('.pre-commit-config.yaml') }} diff --git a/.github/workflows/main-sonar-check.yml b/.github/workflows/main-sonar-check.yml index 66bb1093e04..8248e48022a 100644 --- a/.github/workflows/main-sonar-check.yml +++ b/.github/workflows/main-sonar-check.yml @@ -36,22 +36,22 @@ jobs: with: fetch-depth: 0 - - name: Set up JDK11 + - name: Set up JDK17 uses: actions/setup-java@v4 with: distribution: 'temurin' - java-version: '11' + java-version: '17' cache: 'maven' - name: Cache SonarCloud packages - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.sonar/cache key: ${{ runner.os }}-sonar restore-keys: ${{ runner.os }}-sonar - name: Cache local Maven repository - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.m2/repository key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} diff --git a/.github/workflows/rat.yml b/.github/workflows/rat.yml index b8f83de8194..52ce343841b 100644 --- a/.github/workflows/rat.yml +++ b/.github/workflows/rat.yml @@ -31,10 +31,10 @@ jobs: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - - name: Set up JDK 11 + - name: Set up JDK 17 uses: actions/setup-java@v4 with: - java-version: '11' + java-version: '17' distribution: 'adopt' architecture: x64 cache: maven diff --git a/.github/workflows/sonar-check.yml b/.github/workflows/sonar-check.yml index 2ebcf1fb2db..c36bceb2b90 100644 --- a/.github/workflows/sonar-check.yml +++ b/.github/workflows/sonar-check.yml @@ -38,22 +38,22 @@ jobs: ref: "refs/pull/${{ github.event.number }}/merge" fetch-depth: 0 - - name: Set up JDK11 + - name: Set up JDK17 uses: actions/setup-java@v4 with: distribution: 'temurin' - java-version: '11' + java-version: '17' cache: 'maven' - name: Cache SonarCloud packages - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.sonar/cache key: ${{ runner.os }}-sonar restore-keys: ${{ runner.os }}-sonar - name: Cache local Maven repository - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.m2/repository key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} diff --git a/.github/workflows/ui.yml b/.github/workflows/ui.yml index 476526aff32..56f757133b7 100644 --- a/.github/workflows/ui.yml +++ b/.github/workflows/ui.yml @@ -36,7 +36,7 @@ jobs: - name: Set up Node uses: actions/setup-node@v3 with: - node-version: 14 + node-version: 16 - name: Env details run: | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 51ad34f2dbe..e8f3c6093a9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -59,8 +59,17 @@ repos: - id: mixed-line-ending exclude: \.(cs|xml)$ - id: trailing-whitespace - files: \.(in|java|md|py|rb|sh|vue|yaml|yml)$ + files: \.(header|in|java|md|properties|py|rb|sh|sql|txt|vue|xml|yaml|yml)$ args: [--markdown-linebreak-ext=md] + exclude: ^services/console-proxy/rdpconsole/src/test/doc/freerdp-debug-log\.txt$ + - repo: https://github.com/codespell-project/codespell + rev: v2.2.6 + hooks: + - id: codespell + name: run codespell + description: Check spelling with codespell + args: [--ignore-words=.github/linters/codespell.txt] + exclude: ^ui/package\.json$|^ui/package-lock\.json$|^ui/public/js/less\.min\.js$|^ui/public/locales/.*[^n].*\.json$ - repo: https://github.com/pycqa/flake8 rev: 7.0.0 hooks: diff --git a/INSTALL.md b/INSTALL.md index 6586e4e57fc..e133e7d7b91 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -78,7 +78,7 @@ Clear old database (if any) and deploy the database schema: Export the following variable if you need to run and debug the management server: - $ export MAVEN_OPTS="-Xmx1024m -XX:MaxPermSize=500m -Xdebug -Xrunjdwp:transport=dt_socket,address=8787,server=y,suspend=n" + $ export MAVEN_OPTS="-Xmx1024m -XX:MaxMetaspaceSize=500m -Xdebug -Xrunjdwp:transport=dt_socket,address=8787,server=y,suspend=n" Start the management server: diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md index 8293a22973a..e02cc651853 100644 --- a/PULL_REQUEST_TEMPLATE.md +++ b/PULL_REQUEST_TEMPLATE.md @@ -23,6 +23,7 @@ This PR... - [ ] Enhancement (improves an existing feature and functionality) - [ ] Cleanup (Code refactoring and cleanup, that may add test cases) - [ ] build/CI +- [ ] test (unit or integration test code) ### Feature/Enhancement Scale or Bug Severity diff --git a/agent/conf/cloudstack-agent.logrotate.in b/agent/conf/cloudstack-agent.logrotate.in index 2b3dc87f253..9f22b4bab86 100644 --- a/agent/conf/cloudstack-agent.logrotate.in +++ b/agent/conf/cloudstack-agent.logrotate.in @@ -15,11 +15,13 @@ # specific language governing permissions and limitations # under the License. -/var/log/cloudstack/agent/security_group.log /var/log/cloudstack/agent/resizevolume.log /var/log/cloudstack/agent/rolling-maintenance.log { +/var/log/cloudstack/agent/security_group.log /var/log/cloudstack/agent/resizevolume.log /var/log/cloudstack/agent/rolling-maintenance.log /var/log/cloudstack/agent/agent.out /var/log/cloudstack/agent/agent.err { copytruncate daily rotate 5 compress missingok size 10M + dateext + dateformat -%Y-%m-%d } diff --git a/agent/conf/log4j-cloud.xml.in b/agent/conf/log4j-cloud.xml.in index 44ebd1358af..84957edca03 100644 --- a/agent/conf/log4j-cloud.xml.in +++ b/agent/conf/log4j-cloud.xml.in @@ -30,7 +30,7 @@ under the License. - + @@ -38,7 +38,7 @@ under the License. - + diff --git a/agent/src/main/java/com/cloud/agent/Agent.java b/agent/src/main/java/com/cloud/agent/Agent.java index 56732dad993..15f010808ac 100644 --- a/agent/src/main/java/com/cloud/agent/Agent.java +++ b/agent/src/main/java/com/cloud/agent/Agent.java @@ -504,6 +504,13 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater startup.setGuid(getResourceGuid()); startup.setResourceName(getResourceName()); startup.setVersion(getVersion()); + startup.setArch(getAgentArch()); + } + + protected String getAgentArch() { + final Script command = new Script("/usr/bin/arch", 500, logger); + final OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); + return command.execute(parser); } @Override @@ -858,11 +865,21 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater setId(ready.getHostId()); } + verifyAgentArch(ready.getArch()); processManagementServerList(ready.getMsHostList(), ready.getLbAlgorithm(), ready.getLbCheckInterval()); logger.info("Ready command is processed for agent id = {}", getId()); } + private void verifyAgentArch(String arch) { + if (StringUtils.isNotBlank(arch)) { + String agentArch = getAgentArch(); + if (!arch.equals(agentArch)) { + logger.error("Unexpected arch {}, expected {}", agentArch, arch); + } + } + } + public void processOtherTask(final Task task) { final Object obj = task.get(); if (obj instanceof Response) { @@ -1127,6 +1144,12 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater logger.error("Error parsing task", e); } } else if (task.getType() == Task.Type.DISCONNECT) { + try { + // an issue has been found if reconnect immediately after disconnecting. please refer to https://github.com/apache/cloudstack/issues/8517 + // wait 5 seconds before reconnecting + Thread.sleep(5000); + } catch (InterruptedException e) { + } reconnect(task.getLink()); return; } else if (task.getType() == Task.Type.OTHER) { diff --git a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java index b27ba651e4f..8f97edc3935 100644 --- a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java +++ b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java @@ -751,7 +751,7 @@ public class AgentProperties{ public static final Property IOTHREADS = new Property<>("iothreads", 1); /** - * Enable verbose mode for virt-v2v Instance Conversion from Vmware to KVM + * Enable verbose mode for virt-v2v Instance Conversion from VMware to KVM * Data type: Boolean.
* Default value: false */ diff --git a/agent/src/test/java/com/cloud/agent/AgentShellTest.java b/agent/src/test/java/com/cloud/agent/AgentShellTest.java index f7151779f58..4126692546f 100644 --- a/agent/src/test/java/com/cloud/agent/AgentShellTest.java +++ b/agent/src/test/java/com/cloud/agent/AgentShellTest.java @@ -350,4 +350,16 @@ public class AgentShellTest { Mockito.verify(agentShellSpy).setHosts(expected); } + + @Test + public void updateAndGetConnectedHost() { + String expected = "test"; + + AgentShell shell = new AgentShell(); + shell.setHosts("test"); + shell.getNextHost(); + shell.updateConnectedHost(); + + Assert.assertEquals(expected, shell.getConnectedHost()); + } } diff --git a/api/src/main/java/com/cloud/agent/api/to/BucketTO.java b/api/src/main/java/com/cloud/agent/api/to/BucketTO.java new file mode 100644 index 00000000000..f7e4bfea80f --- /dev/null +++ b/api/src/main/java/com/cloud/agent/api/to/BucketTO.java @@ -0,0 +1,50 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.to; + +import org.apache.cloudstack.storage.object.Bucket; + +public final class BucketTO { + + private String name; + + private String accessKey; + + private String secretKey; + + public BucketTO(Bucket bucket) { + this.name = bucket.getName(); + this.accessKey = bucket.getAccessKey(); + this.secretKey = bucket.getSecretKey(); + } + + public BucketTO(String name) { + this.name = name; + } + + public String getName() { + return this.name; + } + + public String getAccessKey() { + return this.accessKey; + } + + public String getSecretKey() { + return this.secretKey; + } +} diff --git a/api/src/main/java/com/cloud/agent/api/to/FirewallRuleTO.java b/api/src/main/java/com/cloud/agent/api/to/FirewallRuleTO.java index d08884d1cbe..25c75001a3c 100644 --- a/api/src/main/java/com/cloud/agent/api/to/FirewallRuleTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/FirewallRuleTO.java @@ -155,9 +155,7 @@ public class FirewallRuleTO implements InternalIdentity { rule.getIcmpType(), rule.getIcmpCode()); this.trafficType = trafficType; - if (FirewallRule.Purpose.Ipv6Firewall.equals(purpose)) { - this.destCidrList = rule.getDestinationCidrList(); - } + this.destCidrList = rule.getDestinationCidrList(); } public FirewallRuleTO(FirewallRule rule, String srcVlanTag, String srcIp, FirewallRule.Purpose purpose, FirewallRule.TrafficType trafficType, diff --git a/api/src/main/java/com/cloud/agent/api/to/RemoteInstanceTO.java b/api/src/main/java/com/cloud/agent/api/to/RemoteInstanceTO.java index 6e7aa8b21e2..d86eb2a3a7f 100644 --- a/api/src/main/java/com/cloud/agent/api/to/RemoteInstanceTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/RemoteInstanceTO.java @@ -18,40 +18,39 @@ */ package com.cloud.agent.api.to; +import java.io.Serializable; + import com.cloud.agent.api.LogLevel; import com.cloud.hypervisor.Hypervisor; -import java.io.Serializable; - public class RemoteInstanceTO implements Serializable { private Hypervisor.HypervisorType hypervisorType; - private String hostName; private String instanceName; - // Vmware Remote Instances parameters + // VMware Remote Instances parameters (required for exporting OVA through ovftool) // TODO: cloud.agent.transport.Request#getCommands() cannot handle gsoc decode for polymorphic classes private String vcenterUsername; @LogLevel(LogLevel.Log4jLevel.Off) private String vcenterPassword; private String vcenterHost; private String datacenterName; - private String clusterName; public RemoteInstanceTO() { } - public RemoteInstanceTO(String hostName, String instanceName, String vcenterHost, - String datacenterName, String clusterName, - String vcenterUsername, String vcenterPassword) { + public RemoteInstanceTO(String instanceName) { + this.hypervisorType = Hypervisor.HypervisorType.VMware; + this.instanceName = instanceName; + } + + public RemoteInstanceTO(String instanceName, String vcenterHost, String vcenterUsername, String vcenterPassword, String datacenterName) { this.hypervisorType = Hypervisor.HypervisorType.VMware; - this.hostName = hostName; this.instanceName = instanceName; this.vcenterHost = vcenterHost; - this.datacenterName = datacenterName; - this.clusterName = clusterName; this.vcenterUsername = vcenterUsername; this.vcenterPassword = vcenterPassword; + this.datacenterName = datacenterName; } public Hypervisor.HypervisorType getHypervisorType() { @@ -62,10 +61,6 @@ public class RemoteInstanceTO implements Serializable { return this.instanceName; } - public String getHostName() { - return this.hostName; - } - public String getVcenterUsername() { return vcenterUsername; } @@ -81,8 +76,4 @@ public class RemoteInstanceTO implements Serializable { public String getDatacenterName() { return datacenterName; } - - public String getClusterName() { - return clusterName; - } } diff --git a/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java b/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java index b4f4619be9a..6f24b1cd6ca 100644 --- a/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java @@ -84,6 +84,8 @@ public class VirtualMachineTO { Map extraConfig = new HashMap<>(); Map networkIdToNetworkNameMap = new HashMap<>(); DeployAsIsInfoTO deployAsIsInfo; + String metadataManufacturer; + String metadataProductName; public VirtualMachineTO(long id, String instanceName, VirtualMachine.Type type, int cpus, Integer speed, long minRam, long maxRam, BootloaderType bootloader, String os, boolean enableHA, boolean limitCpuUse, String vncPassword) { @@ -429,6 +431,22 @@ public class VirtualMachineTO { this.deployAsIsInfo = deployAsIsInfo; } + public String getMetadataManufacturer() { + return metadataManufacturer; + } + + public void setMetadataManufacturer(String metadataManufacturer) { + this.metadataManufacturer = metadataManufacturer; + } + + public String getMetadataProductName() { + return metadataProductName; + } + + public void setMetadataProductName(String metadataProductName) { + this.metadataProductName = metadataProductName; + } + @Override public String toString() { return String.format("VM {id: \"%s\", name: \"%s\", uuid: \"%s\", type: \"%s\"}", id, name, uuid, type); diff --git a/api/src/main/java/com/cloud/bgp/ASNumber.java b/api/src/main/java/com/cloud/bgp/ASNumber.java new file mode 100644 index 00000000000..b0e5394df75 --- /dev/null +++ b/api/src/main/java/com/cloud/bgp/ASNumber.java @@ -0,0 +1,38 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.bgp; + +import org.apache.cloudstack.acl.InfrastructureEntity; +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + +import java.util.Date; + +public interface ASNumber extends InfrastructureEntity, InternalIdentity, Identity { + + Long getAccountId(); + Long getDomainId(); + long getAsNumber(); + long getAsNumberRangeId(); + long getDataCenterId(); + Date getAllocatedTime(); + boolean isAllocated(); + Long getNetworkId(); + Long getVpcId(); + Date getCreated(); + Date getRemoved(); +} diff --git a/api/src/main/java/com/cloud/bgp/ASNumberRange.java b/api/src/main/java/com/cloud/bgp/ASNumberRange.java new file mode 100644 index 00000000000..ae877ee60db --- /dev/null +++ b/api/src/main/java/com/cloud/bgp/ASNumberRange.java @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.bgp; + +import org.apache.cloudstack.acl.InfrastructureEntity; +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + +import java.util.Date; + +public interface ASNumberRange extends InfrastructureEntity, InternalIdentity, Identity { + + long getStartASNumber(); + long getEndASNumber(); + long getDataCenterId(); + Date getCreated(); +} diff --git a/api/src/main/java/com/cloud/bgp/BGPService.java b/api/src/main/java/com/cloud/bgp/BGPService.java new file mode 100644 index 00000000000..935237092dd --- /dev/null +++ b/api/src/main/java/com/cloud/bgp/BGPService.java @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.bgp; + +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.Network; +import com.cloud.network.vpc.Vpc; +import com.cloud.utils.Pair; +import org.apache.cloudstack.api.command.user.bgp.ListASNumbersCmd; + +import java.util.List; + +public interface BGPService { + + ASNumberRange createASNumberRange(long zoneId, long startASNumber, long endASNumber); + List listASNumberRanges(Long zoneId); + Pair, Integer> listASNumbers(ListASNumbersCmd cmd); + boolean allocateASNumber(long zoneId, Long asNumber, Long networkId, Long vpcId); + Pair releaseASNumber(long zoneId, long asNumber, boolean isReleaseNetworkDestroy); + boolean deleteASRange(long id); + + boolean applyBgpPeers(Network network, boolean continueOnError) throws ResourceUnavailableException; + + boolean applyBgpPeers(Vpc vpc, boolean continueOnError) throws ResourceUnavailableException; +} diff --git a/api/src/main/java/com/cloud/cpu/CPU.java b/api/src/main/java/com/cloud/cpu/CPU.java new file mode 100644 index 00000000000..4e1b9f5a501 --- /dev/null +++ b/api/src/main/java/com/cloud/cpu/CPU.java @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.cpu; + +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.commons.lang3.StringUtils; + +import java.util.LinkedHashMap; +import java.util.Map; + +public class CPU { + + public static final String archX86Identifier = "i686"; + public static final String archX86_64Identifier = "x86_64"; + public static final String archARM64Identifier = "aarch64"; + + public static class CPUArch { + private static final Map cpuArchMap = new LinkedHashMap<>(); + + public static final CPUArch archX86 = new CPUArch(archX86Identifier, 32); + public static final CPUArch amd64 = new CPUArch(archX86_64Identifier, 64); + public static final CPUArch arm64 = new CPUArch(archARM64Identifier, 64); + + private String type; + private int bits; + + public CPUArch(String type, int bits) { + this.type = type; + this.bits = bits; + cpuArchMap.put(type, this); + } + + public String getType() { + return this.type; + } + + public int getBits() { + return this.bits; + } + + public static CPUArch fromType(String type) { + if (StringUtils.isBlank(type)) { + return amd64; + } + switch (type) { + case archX86Identifier: return archX86; + case archX86_64Identifier: return amd64; + case archARM64Identifier: return arm64; + default: throw new CloudRuntimeException(String.format("Unsupported arch type: %s", type)); + } + } + } +} diff --git a/api/src/main/java/com/cloud/dc/DedicatedResources.java b/api/src/main/java/com/cloud/dc/DedicatedResources.java index 63188ca0b0e..23e6cc88a1e 100644 --- a/api/src/main/java/com/cloud/dc/DedicatedResources.java +++ b/api/src/main/java/com/cloud/dc/DedicatedResources.java @@ -21,6 +21,10 @@ import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; public interface DedicatedResources extends InfrastructureEntity, InternalIdentity, Identity { + enum Type { + Zone, Pod, Cluster, Host + } + @Override long getId(); diff --git a/api/src/main/java/com/cloud/event/EventTypes.java b/api/src/main/java/com/cloud/event/EventTypes.java index 01ad12a71e0..5e5309965c1 100644 --- a/api/src/main/java/com/cloud/event/EventTypes.java +++ b/api/src/main/java/com/cloud/event/EventTypes.java @@ -28,10 +28,14 @@ import org.apache.cloudstack.api.response.HostResponse; import org.apache.cloudstack.api.response.PodResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.config.Configuration; +import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnet; import org.apache.cloudstack.ha.HAConfig; +import org.apache.cloudstack.network.BgpPeer; +import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMap; +import org.apache.cloudstack.quota.QuotaTariff; +import org.apache.cloudstack.storage.sharedfs.SharedFS; import org.apache.cloudstack.storage.object.Bucket; import org.apache.cloudstack.storage.object.ObjectStore; -import org.apache.cloudstack.quota.QuotaTariff; import org.apache.cloudstack.usage.Usage; import org.apache.cloudstack.vm.schedule.VMSchedule; @@ -242,6 +246,8 @@ public class EventTypes { public static final String EVENT_ROLE_UPDATE = "ROLE.UPDATE"; public static final String EVENT_ROLE_DELETE = "ROLE.DELETE"; public static final String EVENT_ROLE_IMPORT = "ROLE.IMPORT"; + public static final String EVENT_ROLE_ENABLE = "ROLE.ENABLE"; + public static final String EVENT_ROLE_DISABLE = "ROLE.DISABLE"; public static final String EVENT_ROLE_PERMISSION_CREATE = "ROLE.PERMISSION.CREATE"; public static final String EVENT_ROLE_PERMISSION_UPDATE = "ROLE.PERMISSION.UPDATE"; public static final String EVENT_ROLE_PERMISSION_DELETE = "ROLE.PERMISSION.DELETE"; @@ -333,6 +339,7 @@ public class EventTypes { public static final String EVENT_SNAPSHOT_OFF_PRIMARY = "SNAPSHOT.OFF_PRIMARY"; public static final String EVENT_SNAPSHOT_DELETE = "SNAPSHOT.DELETE"; public static final String EVENT_SNAPSHOT_REVERT = "SNAPSHOT.REVERT"; + public static final String EVENT_SNAPSHOT_EXTRACT = "SNAPSHOT.EXTRACT"; public static final String EVENT_SNAPSHOT_POLICY_CREATE = "SNAPSHOTPOLICY.CREATE"; public static final String EVENT_SNAPSHOT_POLICY_UPDATE = "SNAPSHOTPOLICY.UPDATE"; public static final String EVENT_SNAPSHOT_POLICY_DELETE = "SNAPSHOTPOLICY.DELETE"; @@ -390,6 +397,11 @@ public class EventTypes { public static final String EVENT_VLAN_IP_RANGE_RELEASE = "VLAN.IP.RANGE.RELEASE"; public static final String EVENT_VLAN_IP_RANGE_UPDATE = "VLAN.IP.RANGE.UPDATE"; + // AS Number + public static final String EVENT_AS_RANGE_CREATE = "AS.RANGE.CREATE"; + public static final String EVENT_AS_RANGE_DELETE = "AS.RANGE.DELETE"; + public static final String EVENT_AS_NUMBER_RELEASE = "AS.NUMBER.RELEASE"; + public static final String EVENT_MANAGEMENT_IP_RANGE_CREATE = "MANAGEMENT.IP.RANGE.CREATE"; public static final String EVENT_MANAGEMENT_IP_RANGE_DELETE = "MANAGEMENT.IP.RANGE.DELETE"; public static final String EVENT_MANAGEMENT_IP_RANGE_UPDATE = "MANAGEMENT.IP.RANGE.UPDATE"; @@ -448,9 +460,11 @@ public class EventTypes { public static final String EVENT_MAINTENANCE_PREPARE_PRIMARY_STORAGE = "MAINT.PREPARE.PS"; // Primary storage pool + public static final String EVENT_UPDATE_PRIMARY_STORAGE = "UPDATE.PS"; public static final String EVENT_ENABLE_PRIMARY_STORAGE = "ENABLE.PS"; public static final String EVENT_DISABLE_PRIMARY_STORAGE = "DISABLE.PS"; public static final String EVENT_SYNC_STORAGE_POOL = "SYNC.STORAGE.POOL"; + public static final String EVENT_CHANGE_STORAGE_POOL_SCOPE = "CHANGE.STORAGE.POOL.SCOPE"; // VPN public static final String EVENT_REMOTE_ACCESS_VPN_CREATE = "VPN.REMOTE.ACCESS.CREATE"; @@ -721,6 +735,8 @@ public class EventTypes { // SystemVM public static final String EVENT_LIVE_PATCH_SYSTEMVM = "LIVE.PATCH.SYSTEM.VM"; + //Purge resources + public static final String EVENT_PURGE_EXPUNGED_RESOURCES = "PURGE.EXPUNGED.RESOURCES"; // OBJECT STORE public static final String EVENT_OBJECT_STORE_CREATE = "OBJECT.STORE.CREATE"; @@ -737,6 +753,37 @@ public class EventTypes { public static final String EVENT_QUOTA_TARIFF_DELETE = "QUOTA.TARIFF.DELETE"; public static final String EVENT_QUOTA_TARIFF_UPDATE = "QUOTA.TARIFF.UPDATE"; + // Routing + public static final String EVENT_ZONE_IP4_SUBNET_CREATE = "ZONE.IP4.SUBNET.CREATE"; + public static final String EVENT_ZONE_IP4_SUBNET_UPDATE = "ZONE.IP4.SUBNET.UPDATE"; + public static final String EVENT_ZONE_IP4_SUBNET_DELETE = "ZONE.IP4.SUBNET.DELETE"; + public static final String EVENT_ZONE_IP4_SUBNET_DEDICATE = "ZONE.IP4.SUBNET.DEDICATE"; + public static final String EVENT_ZONE_IP4_SUBNET_RELEASE = "ZONE.IP4.SUBNET.RELEASE"; + public static final String EVENT_IP4_GUEST_SUBNET_CREATE = "IP4.GUEST.SUBNET.CREATE"; + public static final String EVENT_IP4_GUEST_SUBNET_DELETE = "IP4.GUEST.SUBNET.DELETE"; + public static final String EVENT_ROUTING_IPV4_FIREWALL_RULE_CREATE = "ROUTING.IPV4.FIREWALL.RULE.CREATE"; + public static final String EVENT_ROUTING_IPV4_FIREWALL_RULE_UPDATE = "ROUTING.IPV4.FIREWALL.RULE.UPDATE"; + public static final String EVENT_ROUTING_IPV4_FIREWALL_RULE_DELETE = "ROUTING.IPV4.FIREWALL.RULE.DELETE"; + public static final String EVENT_BGP_PEER_CREATE = "BGP.PEER.CREATE"; + public static final String EVENT_BGP_PEER_UPDATE = "BGP.PEER.UPDATE"; + public static final String EVENT_BGP_PEER_DELETE = "BGP.PEER.DELETE"; + public static final String EVENT_BGP_PEER_DEDICATE = "BGP.PEER.DEDICATE"; + public static final String EVENT_BGP_PEER_RELEASE = "BGP.PEER.RELEASE"; + public static final String EVENT_NETWORK_BGP_PEER_UPDATE = "NETWORK.BGP.PEER.UPDATE"; + public static final String EVENT_VPC_BGP_PEER_UPDATE = "VPC.BGP.PEER.UPDATE"; + + // SharedFS + public static final String EVENT_SHAREDFS_CREATE = "SHAREDFS.CREATE"; + public static final String EVENT_SHAREDFS_START = "SHAREDFS.START"; + public static final String EVENT_SHAREDFS_UPDATE = "SHAREDFS.UPDATE"; + public static final String EVENT_SHAREDFS_CHANGE_SERVICE_OFFERING = "SHAREDFS.CHANGE.SERVICE.OFFERING"; + public static final String EVENT_SHAREDFS_CHANGE_DISK_OFFERING = "SHAREDFS.CHANGE.DISK.OFFERING"; + public static final String EVENT_SHAREDFS_STOP = "SHAREDFS.STOP"; + public static final String EVENT_SHAREDFS_RESTART = "SHAREDFS.RESTART"; + public static final String EVENT_SHAREDFS_DESTROY = "SHAREDFS.DESTROY"; + public static final String EVENT_SHAREDFS_EXPUNGE = "SHAREDFS.EXPUNGE"; + public static final String EVENT_SHAREDFS_RECOVER = "SHAREDFS.RECOVER"; + static { // TODO: need a way to force author adding event types to declare the entity details as well, with out braking @@ -838,6 +885,8 @@ public class EventTypes { entityEventDetails.put(EVENT_ROLE_UPDATE, Role.class); entityEventDetails.put(EVENT_ROLE_DELETE, Role.class); entityEventDetails.put(EVENT_ROLE_IMPORT, Role.class); + entityEventDetails.put(EVENT_ROLE_ENABLE, Role.class); + entityEventDetails.put(EVENT_ROLE_DISABLE, Role.class); entityEventDetails.put(EVENT_ROLE_PERMISSION_CREATE, RolePermission.class); entityEventDetails.put(EVENT_ROLE_PERMISSION_UPDATE, RolePermission.class); entityEventDetails.put(EVENT_ROLE_PERMISSION_DELETE, RolePermission.class); @@ -894,6 +943,7 @@ public class EventTypes { // Snapshots entityEventDetails.put(EVENT_SNAPSHOT_CREATE, Snapshot.class); entityEventDetails.put(EVENT_SNAPSHOT_DELETE, Snapshot.class); + entityEventDetails.put(EVENT_SNAPSHOT_EXTRACT, Snapshot.class); entityEventDetails.put(EVENT_SNAPSHOT_ON_PRIMARY, Snapshot.class); entityEventDetails.put(EVENT_SNAPSHOT_OFF_PRIMARY, Snapshot.class); entityEventDetails.put(EVENT_SNAPSHOT_POLICY_CREATE, SnapshotPolicy.class); @@ -998,8 +1048,10 @@ public class EventTypes { entityEventDetails.put(EVENT_MAINTENANCE_PREPARE_PRIMARY_STORAGE, Host.class); // Primary storage pool + entityEventDetails.put(EVENT_UPDATE_PRIMARY_STORAGE, StoragePool.class); entityEventDetails.put(EVENT_ENABLE_PRIMARY_STORAGE, StoragePool.class); entityEventDetails.put(EVENT_DISABLE_PRIMARY_STORAGE, StoragePool.class); + entityEventDetails.put(EVENT_CHANGE_STORAGE_POOL_SCOPE, StoragePool.class); // VPN entityEventDetails.put(EVENT_REMOTE_ACCESS_VPN_CREATE, RemoteAccessVpn.class); @@ -1191,6 +1243,35 @@ public class EventTypes { entityEventDetails.put(EVENT_QUOTA_TARIFF_CREATE, QuotaTariff.class); entityEventDetails.put(EVENT_QUOTA_TARIFF_DELETE, QuotaTariff.class); entityEventDetails.put(EVENT_QUOTA_TARIFF_UPDATE, QuotaTariff.class); + + // Routing + entityEventDetails.put(EVENT_ZONE_IP4_SUBNET_CREATE, DataCenterIpv4GuestSubnet.class); + entityEventDetails.put(EVENT_ZONE_IP4_SUBNET_UPDATE, DataCenterIpv4GuestSubnet.class); + entityEventDetails.put(EVENT_ZONE_IP4_SUBNET_DELETE, DataCenterIpv4GuestSubnet.class); + entityEventDetails.put(EVENT_ZONE_IP4_SUBNET_DEDICATE, DataCenterIpv4GuestSubnet.class); + entityEventDetails.put(EVENT_ZONE_IP4_SUBNET_RELEASE, DataCenterIpv4GuestSubnet.class); + entityEventDetails.put(EVENT_IP4_GUEST_SUBNET_CREATE, Ipv4GuestSubnetNetworkMap.class); + entityEventDetails.put(EVENT_IP4_GUEST_SUBNET_DELETE, Ipv4GuestSubnetNetworkMap.class); + entityEventDetails.put(EVENT_ROUTING_IPV4_FIREWALL_RULE_CREATE, FirewallRule.class); + entityEventDetails.put(EVENT_ROUTING_IPV4_FIREWALL_RULE_UPDATE, FirewallRule.class); + entityEventDetails.put(EVENT_ROUTING_IPV4_FIREWALL_RULE_DELETE, FirewallRule.class); + entityEventDetails.put(EVENT_BGP_PEER_CREATE, BgpPeer.class); + entityEventDetails.put(EVENT_BGP_PEER_UPDATE, BgpPeer.class); + entityEventDetails.put(EVENT_BGP_PEER_DELETE, BgpPeer.class); + entityEventDetails.put(EVENT_BGP_PEER_DEDICATE, BgpPeer.class); + entityEventDetails.put(EVENT_BGP_PEER_RELEASE, BgpPeer.class); + + // SharedFS + entityEventDetails.put(EVENT_SHAREDFS_CREATE, SharedFS.class); + entityEventDetails.put(EVENT_SHAREDFS_START, SharedFS.class); + entityEventDetails.put(EVENT_SHAREDFS_STOP, SharedFS.class); + entityEventDetails.put(EVENT_SHAREDFS_UPDATE, SharedFS.class); + entityEventDetails.put(EVENT_SHAREDFS_CHANGE_SERVICE_OFFERING, SharedFS.class); + entityEventDetails.put(EVENT_SHAREDFS_CHANGE_DISK_OFFERING, SharedFS.class); + entityEventDetails.put(EVENT_SHAREDFS_RESTART, SharedFS.class); + entityEventDetails.put(EVENT_SHAREDFS_DESTROY, SharedFS.class); + entityEventDetails.put(EVENT_SHAREDFS_EXPUNGE, SharedFS.class); + entityEventDetails.put(EVENT_SHAREDFS_RECOVER, SharedFS.class); } public static boolean isNetworkEvent(String eventType) { @@ -1229,4 +1310,8 @@ public class EventTypes { public static boolean isVpcEvent(String eventType) { return EventTypes.EVENT_VPC_CREATE.equals(eventType) || EventTypes.EVENT_VPC_DELETE.equals(eventType); } + + public static void addEntityEventDetail(String event, Class clazz) { + entityEventDetails.put(event, clazz); + } } diff --git a/api/src/main/java/com/cloud/host/Host.java b/api/src/main/java/com/cloud/host/Host.java index 7563bc3b742..56b4ed75a31 100644 --- a/api/src/main/java/com/cloud/host/Host.java +++ b/api/src/main/java/com/cloud/host/Host.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.host; +import com.cloud.cpu.CPU; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.resource.ResourceState; import com.cloud.utils.fsm.StateObject; @@ -54,6 +55,7 @@ public interface Host extends StateObject, Identity, Partition, HAResour } public static final String HOST_UEFI_ENABLE = "host.uefi.enable"; public static final String HOST_VOLUME_ENCRYPTION = "host.volume.encryption"; + public static final String HOST_INSTANCE_CONVERSION = "host.instance.conversion"; /** * @return name of the machine. @@ -207,4 +209,6 @@ public interface Host extends StateObject, Identity, Partition, HAResour boolean isDisabled(); ResourceState getResourceState(); + + CPU.CPUArch getArch(); } diff --git a/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java b/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java index 3c7dbac6442..0c821b4e36c 100644 --- a/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java +++ b/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java @@ -23,6 +23,7 @@ import org.apache.cloudstack.backup.Backup; import org.apache.cloudstack.framework.config.ConfigKey; import com.cloud.agent.api.Command; +import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -101,21 +102,20 @@ public interface HypervisorGuru extends Adapter { * Will generate commands to migrate a vm to a pool. For now this will only work for stopped VMs on Vmware. * * @param vm the stopped vm to migrate - * @param destination the primary storage pool to migrate to + * @param volumeToPool the primary storage pools to migrate to * @return a list of commands to perform for a successful migration */ List finalizeMigrate(VirtualMachine vm, Map volumeToPool); /** - * Will perform a clone of a VM on an external host (if the guru can handle) + * Will return the hypervisor VM (clone VM for PowerOn VMs), performs a clone of a VM if required on an external host (if the guru can handle) * @param hostIp VM's source host IP - * @param vmName name of the source VM to clone from + * @param vmName name of the source VM (clone VM name if cloned) * @param params hypervisor specific additional parameters - * @return a reference to the cloned VM + * @return a reference to the hypervisor or cloned VM, and cloned flag */ - UnmanagedInstanceTO cloneHypervisorVMOutOfBand(String hostIp, String vmName, - Map params); + Pair getHypervisorVMOutOfBandAndCloneIfRequired(String hostIp, String vmName, Map params); /** * Removes a VM created as a clone of a VM on an external host @@ -124,6 +124,23 @@ public interface HypervisorGuru extends Adapter { * @param params hypervisor specific additional parameters * @return true if the operation succeeds, false if not */ - boolean removeClonedHypervisorVMOutOfBand(String hostIp, String vmName, - Map params); + boolean removeClonedHypervisorVMOutOfBand(String hostIp, String vmName, Map params); + + /** + * Create an OVA/OVF template of a VM on an external host (if the guru can handle) + * @param hostIp VM's source host IP + * @param vmName name of the source VM to create template from + * @param params hypervisor specific additional parameters + * @param templateLocation datastore to create the template file + * @return the created template dir/name + */ + String createVMTemplateOutOfBand(String hostIp, String vmName, Map params, DataStoreTO templateLocation, int threadsCountToExportOvf); + + /** + * Removes the template on the location + * @param templateLocation datastore to remove the template file + * @param templateDir the template dir to remove from datastore + * @return true if the operation succeeds, false if not + */ + boolean removeVMTemplateOutOfBand(DataStoreTO templateLocation, String templateDir); } diff --git a/api/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterHelper.java b/api/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelper.java similarity index 90% rename from api/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterHelper.java rename to api/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelper.java index 9bc9cd38632..4d6dec1f08b 100644 --- a/api/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterHelper.java +++ b/api/src/main/java/com/cloud/kubernetes/cluster/KubernetesServiceHelper.java @@ -16,12 +16,14 @@ // under the License. package com.cloud.kubernetes.cluster; -import com.cloud.utils.component.Adapter; import org.apache.cloudstack.acl.ControlledEntity; import java.util.Map; -public interface KubernetesClusterHelper extends Adapter { +import com.cloud.uservm.UserVm; +import com.cloud.utils.component.Adapter; + +public interface KubernetesServiceHelper extends Adapter { enum KubernetesClusterNodeType { CONTROL, WORKER, ETCD, DEFAULT @@ -29,6 +31,7 @@ public interface KubernetesClusterHelper extends Adapter { ControlledEntity findByUuid(String uuid); ControlledEntity findByVmId(long vmId); + void checkVmCanBeDestroyed(UserVm userVm); boolean isValidNodeType(String nodeType); Map getServiceOfferingNodeTypeMap(Map> serviceOfferingNodeTypeMap); Map getTemplateNodeTypeMap(Map> templateNodeTypeMap); diff --git a/api/src/main/java/com/cloud/network/Network.java b/api/src/main/java/com/cloud/network/Network.java index 3b13ef7bd9c..d3bc5005cb7 100644 --- a/api/src/main/java/com/cloud/network/Network.java +++ b/api/src/main/java/com/cloud/network/Network.java @@ -103,7 +103,7 @@ public interface Network extends ControlledEntity, StateObject, I public static final Service Vpn = new Service("Vpn", Capability.SupportedVpnProtocols, Capability.VpnTypes); public static final Service Dhcp = new Service("Dhcp", Capability.ExtraDhcpOptions); public static final Service Dns = new Service("Dns", Capability.AllowDnsSuffixModification); - public static final Service Gateway = new Service("Gateway"); + public static final Service Gateway = new Service("Gateway", Capability.RedundantRouter); public static final Service Firewall = new Service("Firewall", Capability.SupportedProtocols, Capability.MultipleIps, Capability.TrafficStatistics, Capability.SupportedTrafficDirection, Capability.SupportedEgressProtocols); public static final Service Lb = new Service("Lb", Capability.SupportedLBAlgorithms, Capability.SupportedLBIsolation, Capability.SupportedProtocols, @@ -412,12 +412,16 @@ public interface Network extends ControlledEntity, StateObject, I String getGateway(); + void setGateway(String gateway); + // "cidr" is the Cloudstack managed address space, all CloudStack managed vms get IP address from "cidr", // In general "cidr" also serves as the network CIDR // But in case IP reservation is configured for a Guest network, "networkcidr" is the Effective network CIDR for that network, // "cidr" will still continue to be the effective address space for CloudStack managed vms in that Guest network String getCidr(); + void setCidr(String cidr); + // "networkcidr" is the network CIDR of the guest network which uses IP reservation. // It is the summation of "cidr" and the reservedIPrange(the address space used for non CloudStack purposes). // For networks not configured with IP reservation, "networkcidr" is always null @@ -503,4 +507,6 @@ public interface Network extends ControlledEntity, StateObject, I Integer getPublicMtu(); Integer getPrivateMtu(); + + Integer getNetworkCidrSize(); } diff --git a/api/src/main/java/com/cloud/network/NetworkModel.java b/api/src/main/java/com/cloud/network/NetworkModel.java index 53ac735cf05..a4cd87af008 100644 --- a/api/src/main/java/com/cloud/network/NetworkModel.java +++ b/api/src/main/java/com/cloud/network/NetworkModel.java @@ -149,7 +149,7 @@ public interface NetworkModel { boolean areServicesSupportedByNetworkOffering(long networkOfferingId, Service... services); - Network getNetworkWithSGWithFreeIPs(Long zoneId); + Network getNetworkWithSGWithFreeIPs(Account account, Long zoneId); Network getNetworkWithSecurityGroupEnabled(Long zoneId); @@ -173,6 +173,8 @@ public interface NetworkModel { boolean isProviderSupportServiceInNetwork(long networkId, Service service, Provider provider); + boolean isAnyServiceSupportedInNetwork(long networkId, Provider provider, Service... services); + boolean isProviderEnabledInPhysicalNetwork(long physicalNetowrkId, String providerName); String getNetworkTag(HypervisorType hType, Network network); @@ -317,6 +319,8 @@ public interface NetworkModel { void checkIp6Parameters(String startIPv6, String endIPv6, String ip6Gateway, String ip6Cidr) throws InvalidParameterValueException; + void checkIp6CidrSizeEqualTo64(String ip6Cidr) throws InvalidParameterValueException; + void checkRequestedIpAddresses(long networkId, IpAddresses ips) throws InvalidParameterValueException; String getStartIpv6Address(long id); @@ -354,4 +358,8 @@ public interface NetworkModel { void verifyIp6DnsPair(final String ip6Dns1, final String ip6Dns2); + boolean isSecurityGroupSupportedForZone(Long zoneId); + + boolean checkSecurityGroupSupportForNetwork(Account account, DataCenter zone, List networkIds, + List securityGroupsIds); } diff --git a/api/src/main/java/com/cloud/network/NetworkProfile.java b/api/src/main/java/com/cloud/network/NetworkProfile.java index 1a5c80ea871..83dc247cc9e 100644 --- a/api/src/main/java/com/cloud/network/NetworkProfile.java +++ b/api/src/main/java/com/cloud/network/NetworkProfile.java @@ -41,8 +41,8 @@ public class NetworkProfile implements Network { private final Mode mode; private final BroadcastDomainType broadcastDomainType; private TrafficType trafficType; - private final String gateway; - private final String cidr; + private String gateway; + private String cidr; private final String networkCidr; private final String ip6Gateway; private final String ip6Cidr; @@ -62,6 +62,7 @@ public class NetworkProfile implements Network { private final String guruName; private boolean strechedL2Subnet; private String externalId; + private Integer networkCidrSize; public NetworkProfile(Network network) { id = network.getId(); @@ -98,6 +99,7 @@ public class NetworkProfile implements Network { isRedundant = network.isRedundant(); isRollingRestart = network.isRollingRestart(); externalId = network.getExternalId(); + networkCidrSize = network.getNetworkCidrSize(); } @Override @@ -210,11 +212,21 @@ public class NetworkProfile implements Network { return gateway; } + @Override + public void setGateway(String gateway) { + this.gateway = gateway; + } + @Override public String getCidr() { return cidr; } + @Override + public void setCidr(String cidr) { + this.cidr = cidr; + } + @Override public String getNetworkCidr() { return networkCidr; @@ -367,4 +379,9 @@ public class NetworkProfile implements Network { return null; } + @Override + public Integer getNetworkCidrSize() { + return networkCidrSize; + } + } diff --git a/api/src/main/java/com/cloud/network/NetworkService.java b/api/src/main/java/com/cloud/network/NetworkService.java index 02ca13cb9a4..36d58c737cc 100644 --- a/api/src/main/java/com/cloud/network/NetworkService.java +++ b/api/src/main/java/com/cloud/network/NetworkService.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.Map; import com.cloud.dc.DataCenter; +import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.api.command.admin.address.ReleasePodIpCmdByAdmin; import org.apache.cloudstack.api.command.admin.network.DedicateGuestVlanRangeCmd; import org.apache.cloudstack.api.command.admin.network.ListDedicatedGuestVlanRangesCmd; @@ -102,6 +103,10 @@ public interface NetworkService { Network createGuestNetwork(CreateNetworkCmd cmd) throws InsufficientCapacityException, ConcurrentOperationException, ResourceAllocationException; + Network createGuestNetwork(long networkOfferingId, String name, String displayText, Account owner, + PhysicalNetwork physicalNetwork, long zoneId, ControlledEntity.ACLType aclType) throws + InsufficientCapacityException, ConcurrentOperationException, ResourceAllocationException; + Pair, Integer> searchForNetworks(ListNetworksCmd cmd); boolean deleteNetwork(long networkId, boolean forced); diff --git a/api/src/main/java/com/cloud/network/VirtualNetworkApplianceService.java b/api/src/main/java/com/cloud/network/VirtualNetworkApplianceService.java index c47500c7849..cb92739d283 100644 --- a/api/src/main/java/com/cloud/network/VirtualNetworkApplianceService.java +++ b/api/src/main/java/com/cloud/network/VirtualNetworkApplianceService.java @@ -17,17 +17,22 @@ package com.cloud.network; import java.util.List; +import java.util.Map; import org.apache.cloudstack.api.command.admin.router.UpgradeRouterCmd; import org.apache.cloudstack.api.command.admin.router.UpgradeRouterTemplateCmd; +import com.cloud.deploy.DeploymentPlanner; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.OperationTimedoutException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.router.VirtualRouter; import com.cloud.user.Account; import com.cloud.utils.Pair; import com.cloud.vm.Nic; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; public interface VirtualNetworkApplianceService { /** @@ -62,6 +67,10 @@ public interface VirtualNetworkApplianceService { VirtualRouter startRouter(long id) throws ResourceUnavailableException, InsufficientCapacityException, ConcurrentOperationException; + void startRouterForHA(VirtualMachine vm, Map params, DeploymentPlanner planner) + throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, + OperationTimedoutException; + VirtualRouter destroyRouter(long routerId, Account caller, Long callerUserId) throws ResourceUnavailableException, ConcurrentOperationException; VirtualRouter findRouter(long routerId); diff --git a/api/src/main/java/com/cloud/network/VpcVirtualNetworkApplianceService.java b/api/src/main/java/com/cloud/network/VpcVirtualNetworkApplianceService.java index 5c3ee3f1032..cd04db802ca 100644 --- a/api/src/main/java/com/cloud/network/VpcVirtualNetworkApplianceService.java +++ b/api/src/main/java/com/cloud/network/VpcVirtualNetworkApplianceService.java @@ -29,7 +29,6 @@ public interface VpcVirtualNetworkApplianceService extends VirtualNetworkApplian /** * @param router * @param network - * @param isRedundant * @param params TODO * @return * @throws ConcurrentOperationException @@ -42,11 +41,30 @@ public interface VpcVirtualNetworkApplianceService extends VirtualNetworkApplian /** * @param router * @param network - * @param isRedundant * @return * @throws ConcurrentOperationException * @throws ResourceUnavailableException */ boolean removeVpcRouterFromGuestNetwork(VirtualRouter router, Network network) throws ConcurrentOperationException, ResourceUnavailableException; + + /** + * @param router + * @param network + * @return + * @throws ConcurrentOperationException + * @throws ResourceUnavailableException + */ + boolean stopKeepAlivedOnRouter(VirtualRouter router, Network network) throws ConcurrentOperationException, ResourceUnavailableException; + + + /** + * @param router + * @param network + * @return + * @throws ConcurrentOperationException + * @throws ResourceUnavailableException + */ + boolean startKeepAlivedOnRouter(VirtualRouter router, Network network) throws ConcurrentOperationException, ResourceUnavailableException; + } diff --git a/api/src/main/java/com/cloud/network/element/BgpServiceProvider.java b/api/src/main/java/com/cloud/network/element/BgpServiceProvider.java new file mode 100644 index 00000000000..ee919cb1af7 --- /dev/null +++ b/api/src/main/java/com/cloud/network/element/BgpServiceProvider.java @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.element; + +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.Network; +import com.cloud.network.vpc.Vpc; + +import org.apache.cloudstack.network.BgpPeer; + +import java.util.List; + +public interface BgpServiceProvider extends NetworkElement { + + boolean applyBgpPeers(Vpc vpc, Network network, List bgpPeers) throws ResourceUnavailableException; + +} diff --git a/api/src/main/java/com/cloud/network/element/LoadBalancingServiceProvider.java b/api/src/main/java/com/cloud/network/element/LoadBalancingServiceProvider.java index 1bb37be970d..dc0f60f4519 100644 --- a/api/src/main/java/com/cloud/network/element/LoadBalancingServiceProvider.java +++ b/api/src/main/java/com/cloud/network/element/LoadBalancingServiceProvider.java @@ -48,4 +48,7 @@ public interface LoadBalancingServiceProvider extends NetworkElement, IpDeployin List updateHealthChecks(Network network, List lbrules); boolean handlesOnlyRulesInTransitionState(); + + default void expungeLbVmRefs(List vmIds, Long batchSize) { + } } diff --git a/api/src/main/java/com/cloud/network/guru/NetworkGuru.java b/api/src/main/java/com/cloud/network/guru/NetworkGuru.java index cbadbb18a8f..7b81c75ed84 100644 --- a/api/src/main/java/com/cloud/network/guru/NetworkGuru.java +++ b/api/src/main/java/com/cloud/network/guru/NetworkGuru.java @@ -212,4 +212,7 @@ public interface NetworkGuru extends Adapter { boolean isMyTrafficType(TrafficType type); + default boolean isSlaacV6Only() { + return true; + } } diff --git a/api/src/main/java/com/cloud/network/vpc/VpcOffering.java b/api/src/main/java/com/cloud/network/vpc/VpcOffering.java index 3aab57d5d3d..38263f59667 100644 --- a/api/src/main/java/com/cloud/network/vpc/VpcOffering.java +++ b/api/src/main/java/com/cloud/network/vpc/VpcOffering.java @@ -18,6 +18,7 @@ package com.cloud.network.vpc; import java.util.Date; +import com.cloud.offering.NetworkOffering; import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; @@ -57,7 +58,7 @@ public interface VpcOffering extends InternalIdentity, Identity { boolean isForNsx(); - String getNsxMode(); + NetworkOffering.NetworkMode getNetworkMode(); /** * @return service offering id used by VPC virtual router @@ -79,4 +80,8 @@ public interface VpcOffering extends InternalIdentity, Identity { Date getRemoved(); Date getCreated(); + + NetworkOffering.RoutingMode getRoutingMode(); + + Boolean isSpecifyAsNumber(); } diff --git a/api/src/main/java/com/cloud/network/vpc/VpcProvisioningService.java b/api/src/main/java/com/cloud/network/vpc/VpcProvisioningService.java index 1ce3cf8ab0e..10f1ddcc12d 100644 --- a/api/src/main/java/com/cloud/network/vpc/VpcProvisioningService.java +++ b/api/src/main/java/com/cloud/network/vpc/VpcProvisioningService.java @@ -24,6 +24,7 @@ import org.apache.cloudstack.api.command.admin.vpc.CreateVPCOfferingCmd; import org.apache.cloudstack.api.command.admin.vpc.UpdateVPCOfferingCmd; import org.apache.cloudstack.api.command.user.vpc.ListVPCOfferingsCmd; +import com.cloud.offering.NetworkOffering; import com.cloud.utils.Pair; import com.cloud.utils.net.NetUtils; @@ -36,8 +37,10 @@ public interface VpcProvisioningService { VpcOffering createVpcOffering(String name, String displayText, List supportedServices, Map> serviceProviders, Map serviceCapabilitystList, NetUtils.InternetProtocol internetProtocol, - Long serviceOfferingId, Boolean forNsx, String mode, - List domainIds, List zoneIds, VpcOffering.State state); + Long serviceOfferingId, Boolean forNsx, NetworkOffering.NetworkMode networkMode, + List domainIds, List zoneIds, VpcOffering.State state, + NetworkOffering.RoutingMode routingMode, boolean specifyAsNumber); + Pair,Integer> listVpcOfferings(ListVPCOfferingsCmd cmd); diff --git a/api/src/main/java/com/cloud/network/vpc/VpcService.java b/api/src/main/java/com/cloud/network/vpc/VpcService.java index 2cdc034a16e..af2a9847a62 100644 --- a/api/src/main/java/com/cloud/network/vpc/VpcService.java +++ b/api/src/main/java/com/cloud/network/vpc/VpcService.java @@ -56,7 +56,8 @@ public interface VpcService { * @throws ResourceAllocationException TODO */ Vpc createVpc(long zoneId, long vpcOffId, long vpcOwnerId, String vpcName, String displayText, String cidr, String networkDomain, - String ip4Dns1, String ip4Dns2, String ip6Dns1, String ip6Dns2, Boolean displayVpc, Integer publicMtu) + String ip4Dns1, String ip4Dns2, String ip6Dns1, String ip6Dns2, Boolean displayVpc, Integer publicMtu, Integer cidrSize, + Long asNumber, List bgpPeerIds) throws ResourceAllocationException; /** @@ -132,6 +133,8 @@ public interface VpcService { */ boolean startVpc(long vpcId, boolean destroyOnFailure) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException; + void startVpc(CreateVPCCmd cmd) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException; + /** * Shuts down the VPC which includes shutting down all VPC provider and rules cleanup on the backend * diff --git a/api/src/main/java/com/cloud/offering/NetworkOffering.java b/api/src/main/java/com/cloud/offering/NetworkOffering.java index cf01fbf30e2..7011aea679e 100644 --- a/api/src/main/java/com/cloud/offering/NetworkOffering.java +++ b/api/src/main/java/com/cloud/offering/NetworkOffering.java @@ -43,11 +43,15 @@ public interface NetworkOffering extends InfrastructureEntity, InternalIdentity, InternalLbProvider, PublicLbProvider, servicepackageuuid, servicepackagedescription, PromiscuousMode, MacAddressChanges, ForgedTransmits, MacLearning, RelatedNetworkOffering, domainid, zoneid, pvlanType, internetProtocol } - public enum NsxMode { + public enum NetworkMode { NATTED, ROUTED } + enum RoutingMode { + Static, Dynamic + } + public final static String SystemPublicNetwork = "System-Public-Network"; public final static String SystemControlNetwork = "System-Control-Network"; public final static String SystemManagementNetwork = "System-Management-Network"; @@ -102,7 +106,7 @@ public interface NetworkOffering extends InfrastructureEntity, InternalIdentity, boolean isForNsx(); - String getNsxMode(); + NetworkMode getNetworkMode(); TrafficType getTrafficType(); @@ -165,4 +169,8 @@ public interface NetworkOffering extends InfrastructureEntity, InternalIdentity, String getServicePackage(); Date getCreated(); + + RoutingMode getRoutingMode(); + + Boolean isSpecifyAsNumber(); } diff --git a/api/src/main/java/com/cloud/offering/ServiceOffering.java b/api/src/main/java/com/cloud/offering/ServiceOffering.java index 58c7b0dbaf9..acb7a9f1cf9 100644 --- a/api/src/main/java/com/cloud/offering/ServiceOffering.java +++ b/api/src/main/java/com/cloud/offering/ServiceOffering.java @@ -33,6 +33,9 @@ public interface ServiceOffering extends InfrastructureEntity, InternalIdentity, static final String internalLbVmDefaultOffUniqueName = "Cloud.Com-InternalLBVm"; // leaving cloud.com references as these are identifyers and no real world addresses (check against DB) + + static final String PURGE_DB_ENTITIES_KEY = "purge.db.entities"; + enum State { Inactive, Active, } diff --git a/api/src/main/java/com/cloud/org/Cluster.java b/api/src/main/java/com/cloud/org/Cluster.java index 4079c88dfde..5124168084c 100644 --- a/api/src/main/java/com/cloud/org/Cluster.java +++ b/api/src/main/java/com/cloud/org/Cluster.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.org; +import com.cloud.cpu.CPU; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.org.Managed.ManagedState; import org.apache.cloudstack.kernel.Partition; @@ -38,4 +39,6 @@ public interface Cluster extends Grouping, Partition { AllocationState getAllocationState(); ManagedState getManagedState(); + + CPU.CPUArch getArch(); } diff --git a/api/src/main/java/com/cloud/storage/StorageService.java b/api/src/main/java/com/cloud/storage/StorageService.java index c3609cfd8ee..b8df75cd3e4 100644 --- a/api/src/main/java/com/cloud/storage/StorageService.java +++ b/api/src/main/java/com/cloud/storage/StorageService.java @@ -21,6 +21,7 @@ import java.net.UnknownHostException; import java.util.Map; import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; +import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd; import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd; @@ -29,11 +30,13 @@ import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateObjectStoragePoolCmd; +import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; import com.cloud.exception.DiscoveryException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceInUseException; import com.cloud.exception.ResourceUnavailableException; import org.apache.cloudstack.api.command.admin.storage.heuristics.CreateSecondaryStorageSelectorCmd; @@ -92,6 +95,10 @@ public interface StorageService { StoragePool updateStoragePool(UpdateStoragePoolCmd cmd) throws IllegalArgumentException; + StoragePool enablePrimaryStoragePool(Long id); + + StoragePool disablePrimaryStoragePool(Long id); + StoragePool getStoragePool(long id); boolean deleteImageStore(DeleteImageStoreCmd cmd); @@ -110,6 +117,8 @@ public interface StorageService { */ ImageStore migrateToObjectStore(String name, String url, String providerName, Map details) throws DiscoveryException; + ImageStore updateImageStore(UpdateImageStoreCmd cmd); + ImageStore updateImageStoreStatus(Long id, Boolean readonly); void updateStorageCapabilities(Long poolId, boolean failOnChecks); @@ -127,4 +136,6 @@ public interface StorageService { boolean deleteObjectStore(DeleteObjectStoragePoolCmd cmd); ObjectStore updateObjectStore(Long id, UpdateObjectStoragePoolCmd cmd); + + void changeStoragePoolScope(ChangeStoragePoolScopeCmd cmd) throws IllegalArgumentException, InvalidParameterValueException, PermissionDeniedException; } diff --git a/api/src/main/java/com/cloud/storage/Upload.java b/api/src/main/java/com/cloud/storage/Upload.java index 59d203ac73a..4e696e877cc 100644 --- a/api/src/main/java/com/cloud/storage/Upload.java +++ b/api/src/main/java/com/cloud/storage/Upload.java @@ -40,7 +40,7 @@ public interface Upload extends InternalIdentity, Identity { } public static enum Type { - VOLUME, TEMPLATE, ISO + VOLUME, SNAPSHOT, TEMPLATE, ISO } public static enum Mode { diff --git a/api/src/main/java/com/cloud/storage/Volume.java b/api/src/main/java/com/cloud/storage/Volume.java index 40c5660b2df..c7fbdb0a544 100644 --- a/api/src/main/java/com/cloud/storage/Volume.java +++ b/api/src/main/java/com/cloud/storage/Volume.java @@ -271,11 +271,13 @@ public interface Volume extends ControlledEntity, Identity, InternalIdentity, Ba void setExternalUuid(String externalUuid); - public Long getPassphraseId(); + Long getPassphraseId(); - public void setPassphraseId(Long id); + void setPassphraseId(Long id); - public String getEncryptFormat(); + String getEncryptFormat(); - public void setEncryptFormat(String encryptFormat); + void setEncryptFormat(String encryptFormat); + + boolean isDeleteProtection(); } diff --git a/api/src/main/java/com/cloud/storage/VolumeApiService.java b/api/src/main/java/com/cloud/storage/VolumeApiService.java index 4f09702b7db..6f4c7aa09e2 100644 --- a/api/src/main/java/com/cloud/storage/VolumeApiService.java +++ b/api/src/main/java/com/cloud/storage/VolumeApiService.java @@ -102,8 +102,12 @@ public interface VolumeApiService { boolean deleteVolume(long volumeId, Account caller); + Volume changeDiskOfferingForVolumeInternal(Long volumeId, Long newDiskOfferingId, Long newSize, Long newMinIops, Long newMaxIops, boolean autoMigrateVolume, boolean shrinkOk) throws ResourceAllocationException; + Volume attachVolumeToVM(AttachVolumeCmd command); + Volume attachVolumeToVM(Long vmId, Long volumeId, Long deviceId, Boolean allowAttachForSharedFS); + Volume detachVolumeViaDestroyVM(long vmId, long volumeId); Volume detachVolumeFromVM(DetachVolumeCmd cmd); @@ -113,7 +117,9 @@ public interface VolumeApiService { Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, Snapshot.LocationType locationType, List zoneIds) throws ResourceAllocationException; - Volume updateVolume(long volumeId, String path, String state, Long storageId, Boolean displayVolume, String customId, long owner, String chainInfo, String name); + Volume updateVolume(long volumeId, String path, String state, Long storageId, + Boolean displayVolume, Boolean deleteProtection, + String customId, long owner, String chainInfo, String name); /** * Extracts the volume to a particular location. diff --git a/api/src/main/java/com/cloud/storage/snapshot/SnapshotApiService.java b/api/src/main/java/com/cloud/storage/snapshot/SnapshotApiService.java index 0893f337ce2..67afd6aa4e2 100644 --- a/api/src/main/java/com/cloud/storage/snapshot/SnapshotApiService.java +++ b/api/src/main/java/com/cloud/storage/snapshot/SnapshotApiService.java @@ -21,6 +21,7 @@ import java.util.List; import org.apache.cloudstack.api.command.user.snapshot.CopySnapshotCmd; import org.apache.cloudstack.api.command.user.snapshot.CreateSnapshotPolicyCmd; import org.apache.cloudstack.api.command.user.snapshot.DeleteSnapshotPoliciesCmd; +import org.apache.cloudstack.api.command.user.snapshot.ExtractSnapshotCmd; import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotPoliciesCmd; import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotsCmd; import org.apache.cloudstack.api.command.user.snapshot.UpdateSnapshotPolicyCmd; @@ -106,6 +107,16 @@ public interface SnapshotApiService { */ Snapshot createSnapshot(Long volumeId, Long policyId, Long snapshotId, Account snapshotOwner); + /** + * Extracts the snapshot to a particular location. + * + * @param cmd + * the command specifying url (where the snapshot needs to be extracted to), zoneId (zone where the snapshot exists) and + * id (the id of the snapshot) + * + */ + String extractSnapshot(ExtractSnapshotCmd cmd); + /** * Archives a snapshot from primary storage to secondary storage. * @param id Snapshot ID diff --git a/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java b/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java index 6ed6ae0932d..89953d225a0 100644 --- a/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java +++ b/api/src/main/java/com/cloud/template/VirtualMachineTemplate.java @@ -19,6 +19,7 @@ package com.cloud.template; import java.util.Date; import java.util.Map; +import com.cloud.cpu.CPU; import com.cloud.user.UserData; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.api.Identity; @@ -150,4 +151,6 @@ public interface VirtualMachineTemplate extends ControlledEntity, Identity, Inte UserData.UserDataOverridePolicy getUserDataOverridePolicy(); + CPU.CPUArch getArch(); + } diff --git a/api/src/main/java/com/cloud/user/ResourceLimitService.java b/api/src/main/java/com/cloud/user/ResourceLimitService.java index ba19719ea8d..2f4ad1347be 100644 --- a/api/src/main/java/com/cloud/user/ResourceLimitService.java +++ b/api/src/main/java/com/cloud/user/ResourceLimitService.java @@ -38,11 +38,18 @@ public interface ResourceLimitService { static final ConfigKey MaxProjectSecondaryStorage = new ConfigKey<>("Project Defaults", Long.class, "max.project.secondary.storage", "400", "The default maximum secondary storage space (in GiB) that can be used for a project", false); static final ConfigKey ResourceCountCheckInterval = new ConfigKey<>("Advanced", Long.class, "resourcecount.check.interval", "300", - "Time (in seconds) to wait before running resource recalculation and fixing task. Default is 300 seconds, Setting this to 0 disables execution of the task", true); + "Time (in seconds) to wait before running resource recalculation and fixing tasks like stale resource reservation cleanup" + + ". Default is 300 seconds, Setting this to 0 disables execution of the task", true); + static final ConfigKey ResourceReservationCleanupDelay = new ConfigKey<>("Advanced", Long.class, "resource.reservation.cleanup.delay", "3600", + "Time (in seconds) after which a resource reservation gets deleted. Default is 3600 seconds, Setting this to 0 disables execution of the task", true); static final ConfigKey ResourceLimitHostTags = new ConfigKey<>("Advanced", String.class, "resource.limit.host.tags", "", "A comma-separated list of tags for host resource limits", true); static final ConfigKey ResourceLimitStorageTags = new ConfigKey<>("Advanced", String.class, "resource.limit.storage.tags", "", "A comma-separated list of tags for storage resource limits", true); + static final ConfigKey DefaultMaxAccountProjects = new ConfigKey<>("Account Defaults",Long.class,"max.account.projects","10", + "The default maximum number of projects that can be created for an account",false); + static final ConfigKey DefaultMaxDomainProjects = new ConfigKey<>("Domain Defaults",Long.class,"max.domain.projects","50", + "The default maximum number of projects that can be created for a domain",false); static final List HostTagsSupportingTypes = List.of(ResourceType.user_vm, ResourceType.cpu, ResourceType.memory); static final List StorageTagsSupportingTypes = List.of(ResourceType.volume, ResourceType.primary_storage); diff --git a/api/src/main/java/com/cloud/uservm/UserVm.java b/api/src/main/java/com/cloud/uservm/UserVm.java index e30f5e03054..9035d2903c9 100644 --- a/api/src/main/java/com/cloud/uservm/UserVm.java +++ b/api/src/main/java/com/cloud/uservm/UserVm.java @@ -48,4 +48,6 @@ public interface UserVm extends VirtualMachine, ControlledEntity { void setAccountId(long accountId); public boolean isDisplayVm(); + + String getUserVmType(); } diff --git a/api/src/main/java/com/cloud/vm/NicProfile.java b/api/src/main/java/com/cloud/vm/NicProfile.java index d3c1daa1f5d..183c8dcb2d5 100644 --- a/api/src/main/java/com/cloud/vm/NicProfile.java +++ b/api/src/main/java/com/cloud/vm/NicProfile.java @@ -62,6 +62,7 @@ public class NicProfile implements InternalIdentity, Serializable { String iPv4Dns1; String iPv4Dns2; String requestedIPv4; + boolean ipv4AllocationRaceCheck; // IPv6 String iPv6Address; @@ -405,6 +406,13 @@ public class NicProfile implements InternalIdentity, Serializable { this.mtu = mtu; } + public boolean getIpv4AllocationRaceCheck() { + return this.ipv4AllocationRaceCheck; + } + + public void setIpv4AllocationRaceCheck(boolean ipv4AllocationRaceCheck) { + this.ipv4AllocationRaceCheck = ipv4AllocationRaceCheck; + } // // OTHER METHODS diff --git a/api/src/main/java/com/cloud/vm/UserVmService.java b/api/src/main/java/com/cloud/vm/UserVmService.java index 9d8b196a4ff..df7cdca5646 100644 --- a/api/src/main/java/com/cloud/vm/UserVmService.java +++ b/api/src/main/java/com/cloud/vm/UserVmService.java @@ -20,6 +20,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import com.cloud.deploy.DeploymentPlan; import org.apache.cloudstack.api.BaseCmd.HTTPMethod; import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd; import org.apache.cloudstack.api.command.admin.vm.RecoverVMCmd; @@ -42,9 +43,11 @@ import org.apache.cloudstack.api.command.user.vmgroup.CreateVMGroupCmd; import org.apache.cloudstack.api.command.user.vmgroup.DeleteVMGroupCmd; import com.cloud.dc.DataCenter; +import com.cloud.deploy.DeploymentPlanner; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.ManagementServerException; +import com.cloud.exception.OperationTimedoutException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.exception.StorageUnavailableException; @@ -66,10 +69,7 @@ public interface UserVmService { /** * Destroys one virtual machine * - * @param userId - * the id of the user performing the action - * @param vmId - * the id of the virtual machine. + * @param cmd the API Command Object containg the parameters to use for this service action * @throws ConcurrentOperationException * @throws ResourceUnavailableException */ @@ -112,6 +112,12 @@ public interface UserVmService { UserVm rebootVirtualMachine(RebootVMCmd cmd) throws InsufficientCapacityException, ResourceUnavailableException, ResourceAllocationException; + void startVirtualMachine(UserVm vm, DeploymentPlan plan) throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException; + + void startVirtualMachineForHA(VirtualMachine vm, Map params, + DeploymentPlanner planner) throws InsufficientCapacityException, ResourceUnavailableException, + ConcurrentOperationException, OperationTimedoutException; + UserVm updateVirtualMachine(UpdateVMCmd cmd) throws ResourceUnavailableException, InsufficientCapacityException; /** @@ -148,14 +154,6 @@ public interface UserVmService { * Creates a Basic Zone User VM in the database and returns the VM to the * caller. * - * - * - * @param sshKeyPair - * - name of the ssh key pair used to login to the virtual - * machine - * @param cpuSpeed - * @param memory - * @param cpuNumber * @param zone * - availability zone for the virtual machine * @param serviceOffering @@ -231,9 +229,6 @@ public interface UserVmService { * Creates a User VM in Advanced Zone (Security Group feature is enabled) in * the database and returns the VM to the caller. * - * - * - * @param type * @param zone * - availability zone for the virtual machine * @param serviceOffering @@ -309,14 +304,6 @@ public interface UserVmService { * Creates a User VM in Advanced Zone (Security Group feature is disabled) * in the database and returns the VM to the caller. * - * - * - * @param sshKeyPair - * - name of the ssh key pair used to login to the virtual - * machine - * @param cpuSpeed - * @param memory - * @param cpuNumber * @param zone * - availability zone for the virtual machine * @param serviceOffering diff --git a/api/src/main/java/com/cloud/vm/VirtualMachine.java b/api/src/main/java/com/cloud/vm/VirtualMachine.java index e7c5efb773b..e2ea408e7b8 100644 --- a/api/src/main/java/com/cloud/vm/VirtualMachine.java +++ b/api/src/main/java/com/cloud/vm/VirtualMachine.java @@ -333,6 +333,8 @@ public interface VirtualMachine extends RunningOn, ControlledEntity, Partition, */ Date getCreated(); + Date getRemoved(); + long getServiceOfferingId(); Long getBackupOfferingId(); diff --git a/api/src/main/java/com/cloud/vm/VirtualMachineProfile.java b/api/src/main/java/com/cloud/vm/VirtualMachineProfile.java index f2ff3da8449..c67ee4eabc2 100644 --- a/api/src/main/java/com/cloud/vm/VirtualMachineProfile.java +++ b/api/src/main/java/com/cloud/vm/VirtualMachineProfile.java @@ -192,6 +192,10 @@ public interface VirtualMachineProfile { Map getParameters(); + void setCpuOvercommitRatio(Float cpuOvercommitRatio); + + void setMemoryOvercommitRatio(Float memoryOvercommitRatio); + Float getCpuOvercommitRatio(); Float getMemoryOvercommitRatio(); diff --git a/api/src/main/java/org/apache/cloudstack/acl/Role.java b/api/src/main/java/org/apache/cloudstack/acl/Role.java index 5e5ffd583d8..ce4166ef4c8 100644 --- a/api/src/main/java/org/apache/cloudstack/acl/Role.java +++ b/api/src/main/java/org/apache/cloudstack/acl/Role.java @@ -21,7 +21,18 @@ import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; public interface Role extends RoleEntity, InternalIdentity, Identity { + + enum State { + ENABLED, DISABLED; + + @Override + public String toString(){ + return super.toString().toLowerCase(); + } + } + RoleType getRoleType(); boolean isDefault(); boolean isPublicRole(); + State getState(); } diff --git a/api/src/main/java/org/apache/cloudstack/acl/RoleService.java b/api/src/main/java/org/apache/cloudstack/acl/RoleService.java index 07f62a7e7f8..68204d43253 100644 --- a/api/src/main/java/org/apache/cloudstack/acl/RoleService.java +++ b/api/src/main/java/org/apache/cloudstack/acl/RoleService.java @@ -54,6 +54,10 @@ public interface RoleService { boolean deleteRole(Role role); + boolean enableRole(Role role); + + boolean disableRole(Role role); + RolePermission findRolePermission(Long id); RolePermission findRolePermissionByRoleIdAndRule(Long roleId, String rule); @@ -76,7 +80,7 @@ public interface RoleService { */ List listRoles(); - Pair, Integer> listRoles(Long startIndex, Long limit); + Pair, Integer> listRoles(String state, Long startIndex, Long limit); /** * Find all roles that have the giving {@link String} as part of their name. @@ -84,14 +88,14 @@ public interface RoleService { */ List findRolesByName(String name); - Pair, Integer> findRolesByName(String name, String keyword, Long startIndex, Long limit); + Pair, Integer> findRolesByName(String name, String keyword, String state, Long startIndex, Long limit); /** * Find all roles by {@link RoleType}. If the role type is {@link RoleType#Admin}, the calling account must be a root admin, otherwise we return an empty list. */ List findRolesByType(RoleType roleType); - Pair, Integer> findRolesByType(RoleType roleType, Long startIndex, Long limit); + Pair, Integer> findRolesByType(RoleType roleType, String state, Long startIndex, Long limit); List findAllPermissionsBy(Long roleId); diff --git a/api/src/main/java/org/apache/cloudstack/affinity/AffinityGroupResponse.java b/api/src/main/java/org/apache/cloudstack/affinity/AffinityGroupResponse.java index 22842b834fe..69f391a5656 100644 --- a/api/src/main/java/org/apache/cloudstack/affinity/AffinityGroupResponse.java +++ b/api/src/main/java/org/apache/cloudstack/affinity/AffinityGroupResponse.java @@ -25,6 +25,7 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseResponse; import org.apache.cloudstack.api.EntityReference; import org.apache.cloudstack.api.response.ControlledViewEntityResponse; +import org.apache.cloudstack.dedicated.DedicatedResourceResponse; import com.cloud.serializer.Param; @@ -56,6 +57,10 @@ public class AffinityGroupResponse extends BaseResponse implements ControlledVie @Param(description = "the domain name of the affinity group") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the Domain the affinity group belongs to", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.PROJECT_ID) @Param(description = "the project ID of the affinity group") private String projectId; @@ -72,6 +77,10 @@ public class AffinityGroupResponse extends BaseResponse implements ControlledVie @Param(description = "virtual machine IDs associated with this affinity group") private List vmIdList; + @SerializedName("dedicatedresources") + @Param(description = "dedicated resources associated with this affinity group") + private List dedicatedResources; + public AffinityGroupResponse() { } @@ -115,6 +124,11 @@ public class AffinityGroupResponse extends BaseResponse implements ControlledVie this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + @Override public int hashCode() { final int prime = 31; @@ -162,4 +176,12 @@ public class AffinityGroupResponse extends BaseResponse implements ControlledVie this.vmIdList.add(vmId); } + public void addDedicatedResource(DedicatedResourceResponse dedicatedResourceResponse) { + if (this.dedicatedResources == null) { + this.dedicatedResources = new ArrayList<>(); + } + + this.dedicatedResources.add(dedicatedResourceResponse); + } + } diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiArgValidator.java b/api/src/main/java/org/apache/cloudstack/api/ApiArgValidator.java index 3e06fc0e44e..38047235273 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiArgValidator.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiArgValidator.java @@ -32,4 +32,9 @@ public enum ApiArgValidator { * Validates if the parameter is an UUID with the method {@link UuidUtils#isUuid(String)}. */ UuidString, + + /** + * Validates if the parameter is a valid RFC Compliance domain name. + */ + RFCComplianceDomainName, } diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiCommandResourceType.java b/api/src/main/java/org/apache/cloudstack/api/ApiCommandResourceType.java index 38efa428726..a5bedc65d9c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiCommandResourceType.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiCommandResourceType.java @@ -17,7 +17,9 @@ package org.apache.cloudstack.api; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.cloudstack.region.PortableIp; import org.apache.commons.collections.CollectionUtils; @@ -82,15 +84,22 @@ public enum ApiCommandResourceType { ObjectStore(org.apache.cloudstack.storage.object.ObjectStore.class), Bucket(org.apache.cloudstack.storage.object.Bucket.class), QuotaTariff(org.apache.cloudstack.quota.QuotaTariff.class), - KubernetesCluster(com.cloud.kubernetes.cluster.KubernetesCluster.class); + KubernetesCluster(com.cloud.kubernetes.cluster.KubernetesCluster.class), + KubernetesSupportedVersion(null), + SharedFS(org.apache.cloudstack.storage.sharedfs.SharedFS.class); private final Class clazz; + static final Map> additionalClassMappings = new HashMap<>(); + private ApiCommandResourceType(Class clazz) { this.clazz = clazz; } public Class getAssociatedClass() { + if (this.clazz == null && additionalClassMappings.containsKey(this)) { + return additionalClassMappings.get(this); + } return this.clazz; } @@ -120,4 +129,8 @@ public enum ApiCommandResourceType { } return null; } + + public static void setClassMapping(ApiCommandResourceType type, Class clazz) { + additionalClassMappings.put(type, clazz); + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 7943ed0b1ee..cf667063e38 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -29,10 +29,18 @@ public class ApiConstants { public static final String ADDRESS = "address"; public static final String ALGORITHM = "algorithm"; public static final String ALIAS = "alias"; + public static final String ALLOCATED_DATE = "allocateddate"; public static final String ALLOCATED_ONLY = "allocatedonly"; + public static final String ALLOCATED_TIME = "allocated"; + public static final String ALLOW_USER_FORCE_STOP_VM = "allowuserforcestopvm"; public static final String ANNOTATION = "annotation"; public static final String API_KEY = "apikey"; public static final String ARCHIVED = "archived"; + public static final String ARCH = "arch"; + public static final String AS_NUMBER = "asnumber"; + public static final String AS_NUMBER_ID = "asnumberid"; + public static final String ASN_RANGE = "asnrange"; + public static final String ASN_RANGE_ID = "asnrangeid"; public static final String ASYNC_BACKUP = "asyncbackup"; public static final String AUTO_SELECT = "autoselect"; public static final String USER_API_KEY = "userapikey"; @@ -46,6 +54,8 @@ public class ApiConstants { public static final String BACKUP_OFFERING_NAME = "backupofferingname"; public static final String BACKUP_OFFERING_ID = "backupofferingid"; public static final String BASE64_IMAGE = "base64image"; + public static final String BGP_PEERS = "bgppeers"; + public static final String BGP_PEER_IDS = "bgppeerids"; public static final String BITS = "bits"; public static final String BOOTABLE = "bootable"; public static final String BIND_DN = "binddn"; @@ -87,6 +97,8 @@ public class ApiConstants { public static final String DNS_SEARCH_ORDER = "dnssearchorder"; public static final String CHAIN_INFO = "chaininfo"; public static final String CIDR = "cidr"; + public static final String CIDR_SIZE = "cidrsize"; + public static final String IP6_CIDR = "ip6cidr"; public static final String CIDR_LIST = "cidrlist"; public static final String DEST_CIDR_LIST = "destcidrlist"; @@ -127,6 +139,7 @@ public class ApiConstants { public static final String DATACENTER_NAME = "datacentername"; public static final String DATADISK_OFFERING_LIST = "datadiskofferinglist"; public static final String DEFAULT_VALUE = "defaultvalue"; + public static final String DELETE_PROTECTION = "deleteprotection"; public static final String DESCRIPTION = "description"; public static final String DESTINATION = "destination"; public static final String DESTINATION_ZONE_ID = "destzoneid"; @@ -171,11 +184,14 @@ public class ApiConstants { public static final String DURATION = "duration"; public static final String ELIGIBLE = "eligible"; public static final String EMAIL = "email"; + public static final String END_ASN = "endasn"; public static final String END_DATE = "enddate"; public static final String END_IP = "endip"; public static final String END_IPV6 = "endipv6"; public static final String END_PORT = "endport"; public static final String ENTRY_TIME = "entrytime"; + public static final String EVENT_ID = "eventid"; + public static final String EVENT_TYPE = "eventtype"; public static final String EXPIRES = "expires"; public static final String EXTRA_CONFIG = "extraconfig"; public static final String EXTRA_DHCP_OPTION = "extradhcpoption"; @@ -186,10 +202,12 @@ public class ApiConstants { public static final String EXTERNAL_UUID = "externaluuid"; public static final String FENCE = "fence"; public static final String FETCH_LATEST = "fetchlatest"; + public static final String FILESYSTEM = "filesystem"; public static final String FIRSTNAME = "firstname"; public static final String FORCED = "forced"; public static final String FORCED_DESTROY_LOCAL_STORAGE = "forcedestroylocalstorage"; public static final String FORCE_DELETE_HOST = "forcedeletehost"; + public static final String FORCE_MS_TO_IMPORT_VM_FILES = "forcemstoimportvmfiles"; public static final String FORMAT = "format"; public static final String FOR_VIRTUAL_NETWORK = "forvirtualnetwork"; public static final String FOR_SYSTEM_VMS = "forsystemvms"; @@ -210,6 +228,7 @@ public class ApiConstants { public static final String HA_PROVIDER = "haprovider"; public static final String HA_STATE = "hastate"; public static final String HEALTH = "health"; + public static final String HEADERS = "headers"; public static final String HIDE_IP_ADDRESS_USAGE = "hideipaddressusage"; public static final String HOST_ID = "hostid"; public static final String HOST_IDS = "hostids"; @@ -237,6 +256,7 @@ public class ApiConstants { public static final String NEXT_ACL_RULE_ID = "nextaclruleid"; public static final String MOVE_ACL_CONSISTENCY_HASH = "aclconsistencyhash"; public static final String IMAGE_PATH = "imagepath"; + public static final String INSTANCE_CONVERSION_SUPPORTED = "instanceconversionsupported"; public static final String INTERNAL_DNS1 = "internaldns1"; public static final String INTERNAL_DNS2 = "internaldns2"; public static final String INTERNET_PROTOCOL = "internetprotocol"; @@ -264,6 +284,7 @@ public class ApiConstants { public static final String IS_CLEANUP_REQUIRED = "iscleanuprequired"; public static final String IS_DYNAMIC = "isdynamic"; public static final String IS_EDGE = "isedge"; + public static final String IS_ENCRYPTED = "isencrypted"; public static final String IS_EXTRACTABLE = "isextractable"; public static final String IS_FEATURED = "isfeatured"; public static final String IS_IMPLICIT = "isimplicit"; @@ -282,6 +303,7 @@ public class ApiConstants { public static final String JOB_STATUS = "jobstatus"; public static final String KEEPALIVE_ENABLED = "keepaliveenabled"; public static final String KERNEL_VERSION = "kernelversion"; + public static final String KEY = "key"; public static final String LABEL = "label"; public static final String LASTNAME = "lastname"; public static final String LAST_BOOT = "lastboottime"; @@ -309,7 +331,9 @@ public class ApiConstants { public static final String MEMORY = "memory"; public static final String MODE = "mode"; public static final String MOUNT_CKS_ISO_ON_VR = "mountcksisoonvr"; + public static final String MULTI_ARCH = "ismultiarch"; public static final String NSX_MODE = "nsxmode"; + public static final String NETWORK_MODE = "networkmode"; public static final String NSX_ENABLED = "isnsxenabled"; public static final String NAME = "name"; public static final String METHOD_NAME = "methodname"; @@ -351,6 +375,7 @@ public class ApiConstants { public static final String PARENT = "parent"; public static final String PARENT_ID = "parentid"; public static final String PARENT_DOMAIN_ID = "parentdomainid"; + public static final String PARENT_SUBNET = "parentsubnet"; public static final String PARENT_TEMPLATE_ID = "parenttemplateid"; public static final String PASSWORD = "password"; public static final String CURRENT_PASSWORD = "currentpassword"; @@ -359,6 +384,7 @@ public class ApiConstants { public static final String SSHKEY_ENABLED = "sshkeyenabled"; public static final String PATH = "path"; public static final String PAYLOAD = "payload"; + public static final String PAYLOAD_URL = "payloadurl"; public static final String POD_ID = "podid"; public static final String POD_NAME = "podname"; public static final String POD_IDS = "podids"; @@ -386,6 +412,7 @@ public class ApiConstants { public static final String PUBLIC_START_PORT = "publicport"; public static final String PUBLIC_END_PORT = "publicendport"; public static final String PUBLIC_ZONE = "publiczone"; + public static final String PURGE_RESOURCES = "purgeresources"; public static final String RECEIVED_BYTES = "receivedbytes"; public static final String RECONNECT = "reconnect"; public static final String RECOVER = "recover"; @@ -404,11 +431,9 @@ public class ApiConstants { public static final String QUERY_FILTER = "queryfilter"; public static final String SCHEDULE = "schedule"; public static final String SCOPE = "scope"; - public static final String SECRET_KEY = "usersecretkey"; - public static final String SECONDARY_IP = "secondaryip"; - public static final String SINCE = "since"; - public static final String KEY = "key"; public static final String SEARCH_BASE = "searchbase"; + public static final String SECONDARY_IP = "secondaryip"; + public static final String SECRET_KEY = "secretkey"; public static final String SECURITY_GROUP_IDS = "securitygroupids"; public static final String SECURITY_GROUP_NAMES = "securitygroupnames"; public static final String SECURITY_GROUP_NAME = "securitygroupname"; @@ -426,15 +451,17 @@ public class ApiConstants { public static final String SHOW_UNIQUE = "showunique"; public static final String SIGNATURE = "signature"; public static final String SIGNATURE_VERSION = "signatureversion"; + public static final String SINCE = "since"; public static final String SIZE = "size"; + public static final String SIZEGB = "sizegb"; public static final String SNAPSHOT = "snapshot"; public static final String SNAPSHOT_ID = "snapshotid"; public static final String SNAPSHOT_POLICY_ID = "snapshotpolicyid"; public static final String SNAPSHOT_TYPE = "snapshottype"; public static final String SNAPSHOT_QUIESCEVM = "quiescevm"; public static final String SOURCE_ZONE_ID = "sourcezoneid"; - public static final String SUITABLE_FOR_VM = "suitableforvirtualmachine"; - public static final String SUPPORTS_STORAGE_SNAPSHOT = "supportsstoragesnapshot"; + public static final String SSL_VERIFICATION = "sslverification"; + public static final String START_ASN = "startasn"; public static final String START_DATE = "startdate"; public static final String START_ID = "startid"; public static final String START_IP = "startip"; @@ -447,12 +474,16 @@ public class ApiConstants { public static final String STORAGE_POLICY = "storagepolicy"; public static final String STORAGE_MOTION_ENABLED = "storagemotionenabled"; public static final String STORAGE_CAPABILITIES = "storagecapabilities"; + public static final String STORAGE_CUSTOM_STATS = "storagecustomstats"; public static final String SUBNET = "subnet"; public static final String OWNER = "owner"; public static final String SWAP_OWNER = "swapowner"; public static final String SYSTEM_VM_TYPE = "systemvmtype"; public static final String TAGS = "tags"; public static final String STORAGE_TAGS = "storagetags"; + public static final String SUCCESS = "success"; + public static final String SUITABLE_FOR_VM = "suitableforvirtualmachine"; + public static final String SUPPORTS_STORAGE_SNAPSHOT = "supportsstoragesnapshot"; public static final String TARGET_IQN = "targetiqn"; public static final String TEMPLATE_FILTER = "templatefilter"; public static final String TEMPLATE_ID = "templateid"; @@ -486,6 +517,7 @@ public class ApiConstants { public static final String USERNAME = "username"; public static final String USER_CONFIGURABLE = "userconfigurable"; public static final String USER_SECURITY_GROUP_LIST = "usersecuritygrouplist"; + public static final String USER_SECRET_KEY = "usersecretkey"; public static final String USE_VIRTUAL_NETWORK = "usevirtualnetwork"; public static final String UPDATE_IN_SEQUENCE = "updateinsequence"; public static final String VALUE = "value"; @@ -495,6 +527,7 @@ public class ApiConstants { public static final String VIRTUAL_MACHINE_ID_IP = "vmidipmap"; public static final String VIRTUAL_MACHINE_COUNT = "virtualmachinecount"; public static final String VIRTUAL_MACHINE_TYPE = "virtualmachinetype"; + public static final String VIRTUAL_MACHINE_STATE = "vmstate"; public static final String VIRTUAL_MACHINES = "virtualmachines"; public static final String USAGE_ID = "usageid"; public static final String USAGE_TYPE = "usagetype"; @@ -513,6 +546,7 @@ public class ApiConstants { public static final String ISOLATED_PVLAN = "isolatedpvlan"; public static final String ISOLATED_PVLAN_TYPE = "isolatedpvlantype"; public static final String ISOLATION_URI = "isolationuri"; + public static final String IS_ALLOCATED = "isallocated"; public static final String IS_DEDICATED = "isdedicated"; public static final String TAKEN = "taken"; public static final String VM_AVAILABLE = "vmavailable"; @@ -541,6 +575,7 @@ public class ApiConstants { public static final String NETWORK_ID = "networkid"; public static final String NETWORK_FILTER = "networkfilter"; public static final String NIC_ID = "nicid"; + public static final String SPECIFY_AS_NUMBER = "specifyasnumber"; public static final String SPECIFY_VLAN = "specifyvlan"; public static final String IS_DEFAULT = "isdefault"; public static final String IS_SYSTEM = "issystem"; @@ -571,6 +606,7 @@ public class ApiConstants { public static final String ALLOCATION_STATE = "allocationstate"; public static final String MANAGED_STATE = "managedstate"; public static final String MANAGEMENT_SERVER_ID = "managementserverid"; + public static final String MANAGEMENT_SERVER_NAME = "managementservername"; public static final String STORAGE = "storage"; public static final String STORAGE_ID = "storageid"; public static final String PING_STORAGE_SERVER_IP = "pingstorageserverip"; @@ -590,6 +626,7 @@ public class ApiConstants { public static final String AGGREGATE_NAME = "aggregatename"; public static final String POOL_NAME = "poolname"; public static final String VOLUME_NAME = "volumename"; + public static final String VOLUME_STATE = "volumestate"; public static final String SNAPSHOT_POLICY = "snapshotpolicy"; public static final String SNAPSHOT_RESERVATION = "snapshotreservation"; public static final String IP_NETWORK_LIST = "iptonetworklist"; @@ -680,6 +717,8 @@ public class ApiConstants { public static final String ASSOCIATED_NETWORK = "associatednetwork"; public static final String ASSOCIATED_NETWORK_ID = "associatednetworkid"; public static final String ASSOCIATED_NETWORK_NAME = "associatednetworkname"; + public static final String ASSOCIATED_VPC_ID = "associatedvpcid"; + public static final String ASSOCIATED_VPC_NAME = "associatedvpcname"; public static final String SOURCE_NAT_SUPPORTED = "sourcenatsupported"; public static final String RESOURCE_STATE = "resourcestate"; public static final String PROJECT_INVITE_REQUIRED = "projectinviterequired"; @@ -692,8 +731,11 @@ public class ApiConstants { public static final String TRAFFIC_TYPE_IMPLEMENTOR = "traffictypeimplementor"; public static final String KEYWORD = "keyword"; public static final String LIST_ALL = "listall"; + public static final String LIST_ONLY_REMOVED = "listonlyremoved"; public static final String LIST_SYSTEM_VMS = "listsystemvms"; public static final String IP_RANGES = "ipranges"; + public static final String IPV4_ROUTING = "ip4routing"; + public static final String IPV4_ROUTES = "ip4routes"; public static final String IPV6_ROUTING = "ip6routing"; public static final String IPV6_ROUTES = "ip6routes"; public static final String SPECIFY_IP_RANGES = "specifyipranges"; @@ -911,6 +953,7 @@ public class ApiConstants { public static final String AUTOSCALE_VMGROUP_NAME = "autoscalevmgroupname"; public static final String BAREMETAL_DISCOVER_NAME = "baremetaldiscovername"; public static final String BAREMETAL_RCT_URL = "baremetalrcturl"; + public static final String BATCH_SIZE = "batchsize"; public static final String UCS_DN = "ucsdn"; public static final String GSLB_PROVIDER = "gslbprovider"; public static final String EXCLUSIVE_GSLB_PROVIDER = "isexclusivegslbprovider"; @@ -955,6 +998,7 @@ public class ApiConstants { public static final String NUMBER = "number"; public static final String IS_DYNAMICALLY_SCALABLE = "isdynamicallyscalable"; public static final String ROUTING = "isrouting"; + public static final String ROUTING_MODE = "routingmode"; public static final String MAX_CONNECTIONS = "maxconnections"; public static final String SERVICE_STATE = "servicestate"; @@ -1138,6 +1182,20 @@ public class ApiConstants { public static final String PARAMETER_DESCRIPTION_IS_TAG_A_RULE = "Whether the informed tag is a JS interpretable rule or not."; + public static final String WEBHOOK_ID = "webhookid"; + public static final String WEBHOOK_NAME = "webhookname"; + + public static final String NFS_MOUNT_OPTIONS = "nfsmountopts"; + public static final String MOUNT_OPTIONS = "mountopts"; + + public static final String SHAREDFSVM_MIN_CPU_COUNT = "sharedfsvmmincpucount"; + public static final String SHAREDFSVM_MIN_RAM_SIZE = "sharedfsvmminramsize"; + + public static final String PARAMETER_DESCRIPTION_ACTIVATION_RULE = "Quota tariff's activation rule. It can receive a JS script that results in either " + + "a boolean or a numeric value: if it results in a boolean value, the tariff value will be applied according to the result; if it results in a numeric value, the " + + "numeric value will be applied; if the result is neither a boolean nor a numeric value, the tariff will not be applied. If the rule is not informed, the tariff " + + "value will be applied."; + /** * This enum specifies IO Drivers, each option controls specific policies on I/O. * Qemu guests support "threads" and "native" options Since 0.8.8 ; "io_uring" is supported Since 6.3.0 (QEMU 5.0). diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiServerService.java b/api/src/main/java/org/apache/cloudstack/api/ApiServerService.java index 54fda7e36b8..cbbcdc3bda4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiServerService.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiServerService.java @@ -21,7 +21,9 @@ import java.util.Map; import javax.servlet.http.HttpSession; +import com.cloud.domain.Domain; import com.cloud.exception.CloudAuthenticationException; +import com.cloud.user.UserAccount; public interface ApiServerService { public boolean verifyRequest(Map requestParameters, Long userId, InetAddress remoteAddress) throws ServerApiException; @@ -42,4 +44,8 @@ public interface ApiServerService { public String handleRequest(Map params, String responseType, StringBuilder auditTrailSb) throws ServerApiException; public Class getCmdClass(String cmdName); + + boolean forgotPassword(UserAccount userAccount, Domain domain); + + boolean resetPassword(UserAccount userAccount, String token, String password); } diff --git a/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java b/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java index b206cd011c1..457afdc8847 100644 --- a/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java @@ -31,6 +31,7 @@ import java.util.regex.Pattern; import javax.inject.Inject; +import com.cloud.bgp.BGPService; import org.apache.cloudstack.acl.ProjectRoleService; import org.apache.cloudstack.acl.RoleService; import org.apache.cloudstack.acl.RoleType; @@ -38,6 +39,7 @@ import org.apache.cloudstack.affinity.AffinityGroupService; import org.apache.cloudstack.alert.AlertService; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.network.RoutedIpv4Manager; import org.apache.cloudstack.network.lb.ApplicationLoadBalancerService; import org.apache.cloudstack.network.lb.InternalLoadBalancerVMService; import org.apache.cloudstack.query.QueryService; @@ -217,7 +219,11 @@ public abstract class BaseCmd { public VnfTemplateManager vnfTemplateManager; @Inject public BucketApiService _bucketService; + @Inject + public BGPService bgpService; + @Inject + public RoutedIpv4Manager routedIpv4Manager; public abstract void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException; diff --git a/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java index e3aead6881b..9a8282df112 100644 --- a/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java @@ -16,8 +16,10 @@ // under the License. package org.apache.cloudstack.api; +import com.cloud.cpu.CPU; import org.apache.cloudstack.api.response.GuestOSResponse; import org.apache.cloudstack.api.response.TemplateResponse; +import org.apache.commons.lang3.StringUtils; import java.util.Collection; import java.util.Map; @@ -77,6 +79,11 @@ public abstract class BaseUpdateTemplateOrIsoCmd extends BaseCmd { description = "optional boolean field, which indicates if details should be cleaned up or not (if set to true, details removed for this resource, details field ignored; if false or not set, no action)") private Boolean cleanupDetails; + @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, + description = "the CPU arch of the template/ISO. Valid options are: x86_64, aarch64", + since = "4.20") + private String arch; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -141,4 +148,11 @@ public abstract class BaseUpdateTemplateOrIsoCmd extends BaseCmd { public boolean isCleanupDetails(){ return cleanupDetails == null ? false : cleanupDetails.booleanValue(); } + + public CPU.CPUArch getCPUArch() { + if (StringUtils.isBlank(arch)) { + return null; + } + return CPU.CPUArch.fromType(arch); + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/ResponseGenerator.java b/api/src/main/java/org/apache/cloudstack/api/ResponseGenerator.java index ef759aaf9c3..ea0d946ee41 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ResponseGenerator.java +++ b/api/src/main/java/org/apache/cloudstack/api/ResponseGenerator.java @@ -22,6 +22,9 @@ import java.util.List; import java.util.Map; import java.util.Set; +import com.cloud.bgp.ASNumber; +import com.cloud.bgp.ASNumberRange; + import org.apache.cloudstack.storage.object.Bucket; import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupResponse; @@ -31,11 +34,14 @@ import org.apache.cloudstack.api.ResponseObject.ResponseView; import org.apache.cloudstack.api.command.user.job.QueryAsyncJobResultCmd; import org.apache.cloudstack.api.response.AccountResponse; import org.apache.cloudstack.api.response.ApplicationLoadBalancerResponse; +import org.apache.cloudstack.api.response.ASNRangeResponse; +import org.apache.cloudstack.api.response.ASNumberResponse; import org.apache.cloudstack.api.response.AsyncJobResponse; import org.apache.cloudstack.api.response.AutoScalePolicyResponse; import org.apache.cloudstack.api.response.AutoScaleVmGroupResponse; import org.apache.cloudstack.api.response.AutoScaleVmProfileResponse; import org.apache.cloudstack.api.response.BackupOfferingResponse; +import org.apache.cloudstack.api.response.BackupRepositoryResponse; import org.apache.cloudstack.api.response.BackupResponse; import org.apache.cloudstack.api.response.BackupScheduleResponse; import org.apache.cloudstack.api.response.BucketResponse; @@ -54,6 +60,7 @@ import org.apache.cloudstack.api.response.DomainResponse; import org.apache.cloudstack.api.response.DomainRouterResponse; import org.apache.cloudstack.api.response.EventResponse; import org.apache.cloudstack.api.response.ExtractResponse; +import org.apache.cloudstack.api.response.SharedFSResponse; import org.apache.cloudstack.api.response.FirewallResponse; import org.apache.cloudstack.api.response.FirewallRuleResponse; import org.apache.cloudstack.api.response.GlobalLoadBalancerResponse; @@ -139,6 +146,7 @@ import org.apache.cloudstack.api.response.VpnUsersResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.backup.Backup; import org.apache.cloudstack.backup.BackupOffering; +import org.apache.cloudstack.backup.BackupRepository; import org.apache.cloudstack.backup.BackupSchedule; import org.apache.cloudstack.config.Configuration; import org.apache.cloudstack.config.ConfigurationGroup; @@ -151,6 +159,7 @@ import org.apache.cloudstack.region.PortableIp; import org.apache.cloudstack.region.PortableIpRange; import org.apache.cloudstack.region.Region; import org.apache.cloudstack.secstorage.heuristics.Heuristic; +import org.apache.cloudstack.storage.sharedfs.SharedFS; import org.apache.cloudstack.storage.object.ObjectStore; import org.apache.cloudstack.usage.Usage; @@ -345,9 +354,11 @@ public interface ResponseGenerator { SecurityGroupResponse createSecurityGroupResponse(SecurityGroup group); - ExtractResponse createExtractResponse(Long uploadId, Long id, Long zoneId, Long accountId, String mode, String url); + ExtractResponse createImageExtractResponse(Long id, Long zoneId, Long accountId, String mode, String url); - ExtractResponse createExtractResponse(Long id, Long zoneId, Long accountId, String mode, String url); + ExtractResponse createVolumeExtractResponse(Long id, Long zoneId, Long accountId, String mode, String url); + + ExtractResponse createSnapshotExtractResponse(Long id, Long zoneId, Long accountId, String url); String toSerializedString(CreateCmdResponse response, String responseType); @@ -549,4 +560,12 @@ public interface ResponseGenerator { ObjectStoreResponse createObjectStoreResponse(ObjectStore os); BucketResponse createBucketResponse(Bucket bucket); + + ASNRangeResponse createASNumberRangeResponse(ASNumberRange asnRange); + + ASNumberResponse createASNumberResponse(ASNumber asn); + + BackupRepositoryResponse createBackupRepositoryResponse(BackupRepository repository); + + SharedFSResponse createSharedFSResponse(ResponseView view, SharedFS sharedFS); } diff --git a/api/src/main/java/org/apache/cloudstack/api/auth/APIAuthenticationType.java b/api/src/main/java/org/apache/cloudstack/api/auth/APIAuthenticationType.java index 5ba9d182daa..1f78708f7e5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/auth/APIAuthenticationType.java +++ b/api/src/main/java/org/apache/cloudstack/api/auth/APIAuthenticationType.java @@ -17,5 +17,5 @@ package org.apache.cloudstack.api.auth; public enum APIAuthenticationType { - LOGIN_API, LOGOUT_API, READONLY_API, LOGIN_2FA_API + LOGIN_API, LOGOUT_API, READONLY_API, LOGIN_2FA_API, PASSWORD_RESET } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/account/DeleteAccountCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/account/DeleteAccountCmd.java index 36e22acff91..a90fc4aebe9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/account/DeleteAccountCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/account/DeleteAccountCmd.java @@ -89,12 +89,11 @@ public class DeleteAccountCmd extends BaseAsyncCmd { CallContext.current().setEventDetails("Account ID: " + (account != null ? account.getUuid() : getId())); // Account not found is already handled by service boolean result = _regionService.deleteUserAccount(this); - if (result) { - SuccessResponse response = new SuccessResponse(getCommandName()); - setResponseObject(response); - } else { + if (!result) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete user account and all corresponding users"); } + SuccessResponse response = new SuccessResponse(getCommandName()); + setResponseObject(response); } @Override diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/acl/DisableRoleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/acl/DisableRoleCmd.java new file mode 100644 index 00000000000..80cb92c8362 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/acl/DisableRoleCmd.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.acl; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.user.Account; +import org.apache.cloudstack.acl.Role; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiArgValidator; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.RoleResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; + +@APICommand(name = "disableRole", description = "Disables a role", responseObject = SuccessResponse.class, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + since = "4.20.0", + authorized = {RoleType.Admin}) +public class DisableRoleCmd extends BaseCmd { + + @Parameter(name = ApiConstants.ID, type = BaseCmd.CommandType.UUID, required = true, entityType = RoleResponse.class, + description = "ID of the role", validations = {ApiArgValidator.PositiveNumber}) + private Long roleId; + + public Long getRoleId() { + return roleId; + } + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + Role role = roleService.findRole(getRoleId()); + if (role == null) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Cannot find the role with provided id"); + } + CallContext.current().setEventDetails("Role id: " + role.getId()); + boolean result = roleService.disableRole(role); + SuccessResponse response = new SuccessResponse(getCommandName()); + response.setSuccess(result); + setResponseObject(response); + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/acl/EnableRoleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/acl/EnableRoleCmd.java new file mode 100644 index 00000000000..c4a6505d52f --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/acl/EnableRoleCmd.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.acl; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.user.Account; +import org.apache.cloudstack.acl.Role; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiArgValidator; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.RoleResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; + +@APICommand(name = "enableRole", description = "Enables a role", responseObject = SuccessResponse.class, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + since = "4.20.0", + authorized = {RoleType.Admin}) +public class EnableRoleCmd extends BaseCmd { + + @Parameter(name = ApiConstants.ID, type = BaseCmd.CommandType.UUID, required = true, entityType = RoleResponse.class, + description = "ID of the role", validations = {ApiArgValidator.PositiveNumber}) + private Long roleId; + + public Long getRoleId() { + return roleId; + } + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + Role role = roleService.findRole(getRoleId()); + if (role == null) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Cannot find the role with provided id"); + } + CallContext.current().setEventDetails("Role id: " + role.getId()); + boolean result = roleService.enableRole(role); + SuccessResponse response = new SuccessResponse(getCommandName()); + response.setSuccess(result); + setResponseObject(response); + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/acl/ListRolesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/acl/ListRolesCmd.java index fef2b27eaa5..d82cc852e4f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/acl/ListRolesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/acl/ListRolesCmd.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import com.cloud.exception.InvalidParameterValueException; import org.apache.cloudstack.acl.Role; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -51,6 +52,9 @@ public class ListRolesCmd extends BaseListCmd { @Parameter(name = ApiConstants.TYPE, type = CommandType.STRING, description = "List role by role type, valid options are: Admin, ResourceAdmin, DomainAdmin, User.") private String roleType; + @Parameter(name = ApiConstants.STATE, type = CommandType.STRING, description = "List role by role type status, valid options are: enabled, disabled") + private String roleState; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -70,6 +74,17 @@ public class ListRolesCmd extends BaseListCmd { return null; } + public Role.State getRoleState() { + if (roleState == null) { + return null; + } + try { + return Role.State.valueOf(roleState.toUpperCase()); + } catch (IllegalArgumentException e) { + throw new InvalidParameterValueException("Unrecognized role state value"); + } + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -93,6 +108,7 @@ public class ListRolesCmd extends BaseListCmd { roleResponse.setDescription(role.getDescription()); roleResponse.setIsDefault(role.isDefault()); roleResponse.setPublicRole(role.isPublicRole()); + roleResponse.setState(role.getState().toString()); roleResponse.setObjectName("role"); roleResponses.add(roleResponse); } @@ -104,14 +120,16 @@ public class ListRolesCmd extends BaseListCmd { @Override public void execute() { Pair, Integer> roles; + Role.State state = getRoleState(); + String roleStateStr = state != null ? state.toString() : null; if (getId() != null && getId() > 0L) { roles = new Pair<>(Collections.singletonList(roleService.findRole(getId(), true)), 1); } else if (StringUtils.isNotBlank(getName()) || StringUtils.isNotBlank(getKeyword())) { - roles = roleService.findRolesByName(getName(), getKeyword(), getStartIndex(), getPageSizeVal()); + roles = roleService.findRolesByName(getName(), getKeyword(), roleStateStr, getStartIndex(), getPageSizeVal()); } else if (getRoleType() != null) { - roles = roleService.findRolesByType(getRoleType(), getStartIndex(), getPageSizeVal()); + roles = roleService.findRolesByType(getRoleType(), roleStateStr, getStartIndex(), getPageSizeVal()); } else { - roles = roleService.listRoles(getStartIndex(), getPageSizeVal()); + roles = roleService.listRoles(roleStateStr, getStartIndex(), getPageSizeVal()); } setupResponse(roles); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/acl/RoleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/acl/RoleCmd.java index 4c317d06b13..b3d816adc3f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/acl/RoleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/acl/RoleCmd.java @@ -59,6 +59,7 @@ public abstract class RoleCmd extends BaseCmd { response.setRoleType(role.getRoleType()); response.setDescription(role.getDescription()); response.setPublicRole(role.isPublicRole()); + response.setState(role.getState().toString()); response.setResponseName(getCommandName()); response.setObjectName("role"); setResponseObject(response); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/bgp/CreateASNRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/bgp/CreateASNRangeCmd.java new file mode 100644 index 00000000000..beacba850c3 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/bgp/CreateASNRangeCmd.java @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.bgp; + +import com.cloud.bgp.ASNumberRange; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.user.Account; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ASNRangeResponse; +import org.apache.cloudstack.api.response.ZoneResponse; + +@APICommand(name = "createASNRange", + description = "Creates a range of Autonomous Systems for BGP Dynamic Routing", + responseObject = ASNRangeResponse.class, + entityType = {ASNumberRange.class}, + since = "4.20.0", + authorized = {RoleType.Admin}) +public class CreateASNRangeCmd extends BaseCmd { + + @Parameter(name = ApiConstants.ZONE_ID, type = BaseCmd.CommandType.UUID, entityType = ZoneResponse.class, + description = "the zone ID", required = true) + private Long zoneId; + + @Parameter(name = ApiConstants.START_ASN, type = CommandType.LONG, required=true, description = "the start AS Number") + private Long startASNumber; + + @Parameter(name = ApiConstants.END_ASN, type = CommandType.LONG, required=true, description = "the end AS Number") + private Long endASNumber; + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + try { + ASNumberRange asnRange = bgpService.createASNumberRange(zoneId, startASNumber, endASNumber); + ASNRangeResponse response = _responseGenerator.createASNumberRangeResponse(asnRange); + response.setResponseName(getCommandName()); + setResponseObject(response); + } catch (Exception e) { + String msg = String.format("Cannot create AS Number Range %s-%s for zone %s: %s", startASNumber, endASNumber, zoneId, e.getMessage()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, msg); + } + } + + public Long getZoneId() { + return zoneId; + } + + public Long getStartASNumber() { + return startASNumber; + } + + public Long getEndASNumber() { + return endASNumber; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/bgp/DeleteASNRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/bgp/DeleteASNRangeCmd.java new file mode 100644 index 00000000000..33e139315bf --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/bgp/DeleteASNRangeCmd.java @@ -0,0 +1,79 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.bgp; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ASNRangeResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; + +@APICommand(name = "deleteASNRange", + description = "deletes a range of Autonomous Systems for BGP Dynamic Routing", + responseObject = SuccessResponse.class, + since = "4.20.0", + authorized = {RoleType.Admin}) +public class DeleteASNRangeCmd extends BaseCmd { + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + //////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, + type = CommandType.UUID, + entityType = ASNRangeResponse.class, + required = true, + description = "ID of the AS range") + private Long id; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + if (bgpService.deleteASRange(getId())) { + SuccessResponse response = new SuccessResponse(getCommandName()); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to remove AS range: " + getId()); + } + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/bgp/ListASNRangesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/bgp/ListASNRangesCmd.java new file mode 100644 index 00000000000..82e54581102 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/bgp/ListASNRangesCmd.java @@ -0,0 +1,79 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.bgp; + +import com.cloud.bgp.ASNumberRange; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.user.Account; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ASNRangeResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.ZoneResponse; + +import java.util.ArrayList; +import java.util.List; + +@APICommand(name = "listASNRanges", + description = "List Autonomous Systems Number Ranges", + responseObject = ASNRangeResponse.class, + entityType = {ASNumberRange.class}, + since = "4.20.0", + authorized = {RoleType.Admin}) +public class ListASNRangesCmd extends BaseListCmd { + + @Parameter(name = ApiConstants.ZONE_ID, type = BaseCmd.CommandType.UUID, entityType = ZoneResponse.class, + description = "the zone ID") + private Long zoneId; + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + try { + List ranges = bgpService.listASNumberRanges(zoneId); + ListResponse response = new ListResponse<>(); + List responses = new ArrayList<>(); + for (ASNumberRange asnRange : ranges) { + responses.add(_responseGenerator.createASNumberRangeResponse(asnRange)); + } + response.setResponses(responses); + response.setResponseName(getCommandName()); + setResponseObject(response); + } catch (Exception e) { + String msg = String.format("Error listing AS Number Ranges: %s", e.getMessage()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, msg); + } + } + + public Long getZoneId() { + return zoneId; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/bgp/ReleaseASNumberCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/bgp/ReleaseASNumberCmd.java new file mode 100644 index 00000000000..687f60dc6da --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/bgp/ReleaseASNumberCmd.java @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.bgp; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.user.Account; +import com.cloud.utils.Pair; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.api.response.ZoneResponse; + +@APICommand(name = "releaseASNumber", + description = "Releases an AS Number back to the pool", + since = "4.20.0", + authorized = {RoleType.Admin}, + responseObject = SuccessResponse.class, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false) +public class ReleaseASNumberCmd extends BaseCmd { + + @Parameter(name = ApiConstants.ZONE_ID, type = BaseCmd.CommandType.UUID, entityType = ZoneResponse.class, + description = "the zone ID", required = true) + private Long zoneId; + + @Parameter(name= ApiConstants.AS_NUMBER, type=CommandType.LONG, description="the AS Number to be released", + required = true) + private Long asNumber; + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + try { + Pair resultPair = bgpService.releaseASNumber(zoneId, asNumber, false); + Boolean result = resultPair.first(); + if (!result) { + String details = resultPair.second(); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Cannot release AS Number %s: %s", asNumber, details)); + } + SuccessResponse response = new SuccessResponse(getCommandName()); + response.setDisplayText(String.format("AS Number %s is released successfully", asNumber)); + setResponseObject(response); + } catch (Exception e) { + String msg = String.format("Error releasing AS Number %s: %s", asNumber, e.getMessage()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, msg); + } + } + + public Long getZoneId() { + return zoneId; + } + + public Long getAsNumber() { + return asNumber; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java index 184a443d9db..69cb43ce40e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java @@ -20,6 +20,7 @@ package org.apache.cloudstack.api.command.admin.cluster; import java.util.ArrayList; import java.util.List; +import com.cloud.cpu.CPU; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.APICommand; @@ -67,6 +68,11 @@ public class AddClusterCmd extends BaseCmd { description = "hypervisor type of the cluster: XenServer,KVM,VMware,Hyperv,BareMetal,Simulator,Ovm3") private String hypervisor; + @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, + description = "the CPU arch of the cluster. Valid options are: x86_64, aarch64", + since = "4.20") + private String arch; + @Parameter(name = ApiConstants.CLUSTER_TYPE, type = CommandType.STRING, required = true, description = "type of the cluster: CloudManaged, ExternalManaged") private String clusterType; @@ -204,6 +210,10 @@ public class AddClusterCmd extends BaseCmd { return ApiCommandResourceType.Cluster; } + public CPU.CPUArch getArch() { + return CPU.CPUArch.fromType(arch); + } + @Override public void execute() { try { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java index 77bb97fd39d..c4ee87380ed 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java @@ -16,6 +16,7 @@ // under the License. package org.apache.cloudstack.api.command.admin.cluster; +import com.cloud.cpu.CPU; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.APICommand; @@ -29,6 +30,7 @@ import org.apache.cloudstack.api.response.ClusterResponse; import com.cloud.exception.InvalidParameterValueException; import com.cloud.org.Cluster; import com.cloud.user.Account; +import org.apache.commons.lang3.StringUtils; @APICommand(name = "updateCluster", description = "Updates an existing cluster", responseObject = ClusterResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) @@ -53,6 +55,11 @@ public class UpdateClusterCmd extends BaseCmd { @Parameter(name = ApiConstants.MANAGED_STATE, type = CommandType.STRING, description = "whether this cluster is managed by cloudstack") private String managedState; + @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, + description = "the CPU arch of the cluster. Valid options are: x86_64, aarch64", + since = "4.20") + private String arch; + public String getClusterName() { return clusterName; } @@ -108,6 +115,13 @@ public class UpdateClusterCmd extends BaseCmd { return ApiCommandResourceType.Cluster; } + public CPU.CPUArch getArch() { + if (StringUtils.isBlank(arch)) { + return null; + } + return CPU.CPUArch.fromType(arch); + } + @Override public void execute() { Cluster cluster = _resourceService.getCluster(getId()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateHypervisorCapabilitiesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateHypervisorCapabilitiesCmd.java index 50984188bf5..01f7af10841 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateHypervisorCapabilitiesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateHypervisorCapabilitiesCmd.java @@ -43,6 +43,12 @@ public class UpdateHypervisorCapabilitiesCmd extends BaseCmd { @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = HypervisorCapabilitiesResponse.class, description = "ID of the hypervisor capability") private Long id; + @Parameter(name = ApiConstants.HYPERVISOR, type = CommandType.STRING, description = "the hypervisor for which the hypervisor capabilities are to be updated", since = "4.19.1") + private String hypervisor; + + @Parameter(name = ApiConstants.HYPERVISOR_VERSION, type = CommandType.STRING, description = "the hypervisor version for which the hypervisor capabilities are to be updated", since = "4.19.1") + private String hypervisorVersion; + @Parameter(name = ApiConstants.SECURITY_GROUP_EANBLED, type = CommandType.BOOLEAN, description = "set true to enable security group for this hypervisor.") private Boolean securityGroupEnabled; @@ -73,6 +79,14 @@ public class UpdateHypervisorCapabilitiesCmd extends BaseCmd { return id; } + public String getHypervisor() { + return hypervisor; + } + + public String getHypervisorVersion() { + return hypervisorVersion; + } + public Long getMaxGuestsLimit() { return maxGuestsLimit; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateIpv4SubnetForGuestNetworkCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateIpv4SubnetForGuestNetworkCmd.java new file mode 100644 index 00000000000..a482cb1d4f2 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateIpv4SubnetForGuestNetworkCmd.java @@ -0,0 +1,108 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.DataCenterIpv4SubnetResponse; +import org.apache.cloudstack.api.response.Ipv4SubnetForGuestNetworkResponse; + +import com.cloud.event.EventTypes; +import com.cloud.user.Account; +import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMap; + +@APICommand(name = "createIpv4SubnetForGuestNetwork", + description = "Creates a IPv4 subnet for guest networks.", + responseObject = Ipv4SubnetForGuestNetworkResponse.class, + since = "4.20.0", + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class CreateIpv4SubnetForGuestNetworkCmd extends BaseAsyncCmd { + + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.PARENT_ID, + type = CommandType.UUID, + entityType = DataCenterIpv4SubnetResponse.class, + required = true, + description = "The zone Ipv4 subnet which the IPv4 subnet belongs to.") + private Long parentId; + + @Parameter(name = ApiConstants.SUBNET, + type = CommandType.STRING, + description = "The CIDR of this Ipv4 subnet.") + private String subnet; + + @Parameter(name = ApiConstants.CIDR_SIZE, + type = CommandType.INTEGER, + description = "the CIDR size of IPv4 network. This is mutually exclusive with subnet.") + private Integer cidrSize; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + + public Long getParentId() { + return parentId; + } + + public String getSubnet() { + return subnet; + } + + public Integer getCidrSize() { + return cidrSize; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_IP4_GUEST_SUBNET_CREATE; + } + + @Override + public String getEventDescription() { + return "Creating guest IPv4 subnet " + getSubnet() + " in zone subnet=" + getParentId(); + } + + @Override + public void execute() { + Ipv4GuestSubnetNetworkMap result = routedIpv4Manager.createIpv4SubnetForGuestNetwork(this); + if (result != null) { + Ipv4SubnetForGuestNetworkResponse response = routedIpv4Manager.createIpv4SubnetForGuestNetworkResponse(result); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create zone guest IPv4 subnet."); + } + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateIpv4SubnetForZoneCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateIpv4SubnetForZoneCmd.java new file mode 100644 index 00000000000..5f48cf9c632 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateIpv4SubnetForZoneCmd.java @@ -0,0 +1,125 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiArgValidator; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.DataCenterIpv4SubnetResponse; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.ProjectResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnet; + +import com.cloud.event.EventTypes; +import com.cloud.user.Account; + +@APICommand(name = "createIpv4SubnetForZone", + description = "Creates a IPv4 subnet for a zone.", + responseObject = DataCenterIpv4SubnetResponse.class, + since = "4.20.0", + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class CreateIpv4SubnetForZoneCmd extends BaseAsyncCmd { + + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ZONE_ID, + type = CommandType.UUID, + entityType = ZoneResponse.class, + required = true, + description = "UUID of the zone which the IPv4 subnet belongs to.", + validations = {ApiArgValidator.PositiveNumber}) + private Long zoneId; + + @Parameter(name = ApiConstants.SUBNET, + type = CommandType.STRING, + required = true, + description = "The CIDR of the IPv4 subnet.") + private String subnet; + + @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "account who will own the IPv4 subnet") + private String accountName; + + @Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class, description = "project who will own the IPv4 subnet") + private Long projectId; + + @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class, description = "domain ID of the account owning the IPv4 subnet") + private Long domainId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + + public Long getZoneId() { + return zoneId; + } + + public String getSubnet() { + return subnet; + } + + public String getAccountName() { + return accountName; + } + + public Long getProjectId() { + return projectId; + } + + public Long getDomainId() { + return domainId; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_ZONE_IP4_SUBNET_CREATE; + } + + @Override + public String getEventDescription() { + return "Creating guest IPv4 subnet " + getSubnet() + " for zone=" + getZoneId(); + } + + @Override + public void execute() { + DataCenterIpv4GuestSubnet result = routedIpv4Manager.createDataCenterIpv4GuestSubnet(this); + if (result != null) { + DataCenterIpv4SubnetResponse response = routedIpv4Manager.createDataCenterIpv4SubnetResponse(result); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create zone guest IPv4 subnet."); + } + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateNetworkCmdByAdmin.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateNetworkCmdByAdmin.java index cd9770877ed..d8b57f79528 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateNetworkCmdByAdmin.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateNetworkCmdByAdmin.java @@ -24,10 +24,13 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ResponseObject.ResponseView; import org.apache.cloudstack.api.command.admin.AdminCmd; import org.apache.cloudstack.api.command.user.network.CreateNetworkCmd; +import org.apache.cloudstack.api.response.BgpPeerResponse; import org.apache.cloudstack.api.response.NetworkResponse; import com.cloud.network.Network; +import java.util.List; + @APICommand(name = "createNetwork", description = "Creates a network", responseObject = NetworkResponse.class, responseView = ResponseView.Full, entityType = {Network.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateNetworkCmdByAdmin extends CreateNetworkCmd implements AdminCmd { @@ -49,6 +52,14 @@ public class CreateNetworkCmdByAdmin extends CreateNetworkCmd implements AdminCm validations = {ApiArgValidator.NotNullOrEmpty}) private String routerIpv6; + @Parameter(name = ApiConstants.BGP_PEER_IDS, + type = CommandType.LIST, + collectionType = CommandType.UUID, + entityType = BgpPeerResponse.class, + description = "Ids of the Bgp Peer for the network", + since = "4.20.0") + private List bgpPeerIds; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -78,4 +89,8 @@ public class CreateNetworkCmdByAdmin extends CreateNetworkCmd implements AdminCm public String getRouterIpv6() { return routerIpv6; } + + public List getBgpPeerIds() { + return bgpPeerIds; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateNetworkOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateNetworkOfferingCmd.java index 9117bcfc193..af3db374a7c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateNetworkOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateNetworkOfferingCmd.java @@ -146,12 +146,6 @@ public class CreateNetworkOfferingCmd extends BaseCmd { since = "4.20.0") private Boolean forNsx; - @Parameter(name = ApiConstants.NSX_MODE, - type = CommandType.STRING, - description = "Indicates the mode with which the network will operate. Valid option: NATTED or ROUTED", - since = "4.20.0") - private String nsxMode; - @Parameter(name = ApiConstants.NSX_SUPPORT_LB, type = CommandType.BOOLEAN, description = "true if network offering for NSX network offering supports Load balancer service.", @@ -164,6 +158,12 @@ public class CreateNetworkOfferingCmd extends BaseCmd { since = "4.20.0") private Boolean nsxSupportsInternalLbService; + @Parameter(name = ApiConstants.NETWORK_MODE, + type = CommandType.STRING, + description = "Indicates the mode with which the network will operate. Valid option: NATTED or ROUTED", + since = "4.20.0") + private String networkMode; + @Parameter(name = ApiConstants.FOR_TUNGSTEN, type = CommandType.BOOLEAN, description = "true if network offering is meant to be used for Tungsten-Fabric, false otherwise.") @@ -211,6 +211,16 @@ public class CreateNetworkOfferingCmd extends BaseCmd { since = "4.16") private Boolean enable; + @Parameter(name = ApiConstants.SPECIFY_AS_NUMBER, type = CommandType.BOOLEAN, since = "4.20.0", + description = "true if network offering supports choosing AS number") + private Boolean specifyAsNumber; + + @Parameter(name = ApiConstants.ROUTING_MODE, + type = CommandType.STRING, + since = "4.20.0", + description = "the routing mode for the network offering. Supported types are: Static or Dynamic.") + private String routingMode; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -302,8 +312,8 @@ public class CreateNetworkOfferingCmd extends BaseCmd { return BooleanUtils.isTrue(forNsx); } - public String getNsxMode() { - return nsxMode; + public String getNetworkMode() { + return networkMode; } public boolean getNsxSupportsLbService() { @@ -462,6 +472,14 @@ public class CreateNetworkOfferingCmd extends BaseCmd { return false; } + public boolean getSpecifyAsNumber() { + return BooleanUtils.toBoolean(specifyAsNumber); + } + + public String getRoutingMode() { + return routingMode; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DedicateIpv4SubnetForZoneCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DedicateIpv4SubnetForZoneCmd.java new file mode 100644 index 00000000000..2df032c559c --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DedicateIpv4SubnetForZoneCmd.java @@ -0,0 +1,111 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.DataCenterIpv4SubnetResponse; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.ProjectResponse; +import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnet; + +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = "dedicateIpv4SubnetForZone", + description = "Dedicates an existing IPv4 subnet for a zone to an account or a domain.", + responseObject = DataCenterIpv4SubnetResponse.class, + since = "4.20.0", + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class DedicateIpv4SubnetForZoneCmd extends BaseAsyncCmd { + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = DataCenterIpv4SubnetResponse.class, required = true, description = "Id of the guest network IPv4 subnet") + private Long id; + + @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "account who will own the IPv4 subnet") + private String accountName; + + @Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class, description = "project who will own the IPv4 subnet") + private Long projectId; + + @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class, description = "domain ID of the account owning the IPv4 subnet") + private Long domainId; + + public Long getId() { + return id; + } + + public String getAccountName() { + return accountName; + } + + public Long getProjectId() { + return projectId; + } + + public Long getDomainId() { + return domainId; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_ZONE_IP4_SUBNET_DEDICATE; + } + + @Override + public String getEventDescription() { + return "Dedicating zone IPv4 subnet " + getId(); + } + + @Override + public void execute() { + try { + DataCenterIpv4GuestSubnet result = routedIpv4Manager.dedicateDataCenterIpv4GuestSubnet(this); + if (result != null) { + DataCenterIpv4SubnetResponse response = routedIpv4Manager.createDataCenterIpv4SubnetResponse(result); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to dedicate guest network IPv4 subnet:" + getId()); + } + } catch (InvalidParameterValueException ex) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ex.getMessage()); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteIpv4SubnetForGuestNetworkCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteIpv4SubnetForGuestNetworkCmd.java new file mode 100644 index 00000000000..28a646f9d03 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteIpv4SubnetForGuestNetworkCmd.java @@ -0,0 +1,88 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.Ipv4SubnetForGuestNetworkResponse; +import org.apache.cloudstack.api.response.SuccessResponse; + +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = "deleteIpv4SubnetForGuestNetwork", + description = "Deletes an existing IPv4 subnet for guest network.", + responseObject = SuccessResponse.class, + since = "4.20.0", + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class DeleteIpv4SubnetForGuestNetworkCmd extends BaseAsyncCmd { + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = Ipv4SubnetForGuestNetworkResponse.class, required = true, description = "Id of the guest network IPv4 subnet") + private Long id; + + public Long getId() { + return id; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_IP4_GUEST_SUBNET_DELETE; + } + + @Override + public String getEventDescription() { + return "Deleting guest IPv4 subnet " + getId(); + } + + @Override + public void execute() { + try { + boolean result = routedIpv4Manager.deleteIpv4SubnetForGuestNetwork(this); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete guest network IPv4 subnet:" + getId()); + } + } catch (InvalidParameterValueException ex) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ex.getMessage()); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteIpv4SubnetForZoneCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteIpv4SubnetForZoneCmd.java new file mode 100644 index 00000000000..222bc1bad98 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteIpv4SubnetForZoneCmd.java @@ -0,0 +1,88 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.DataCenterIpv4SubnetResponse; +import org.apache.cloudstack.api.response.SuccessResponse; + +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = "deleteIpv4SubnetForZone", + description = "Deletes an existing IPv4 subnet for a zone.", + responseObject = SuccessResponse.class, + since = "4.20.0", + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class DeleteIpv4SubnetForZoneCmd extends BaseAsyncCmd { + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = DataCenterIpv4SubnetResponse.class, required = true, description = "Id of the guest network IPv4 subnet") + private Long id; + + public Long getId() { + return id; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_ZONE_IP4_SUBNET_DELETE; + } + + @Override + public String getEventDescription() { + return "Deleting zone IPv4 subnet " + getId(); + } + + @Override + public void execute() { + try { + boolean result = routedIpv4Manager.deleteDataCenterIpv4GuestSubnet(this); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete guest network IPv4 subnet:" + getId()); + } + } catch (InvalidParameterValueException ex) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ex.getMessage()); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListIpv4SubnetsForGuestNetworkCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListIpv4SubnetsForGuestNetworkCmd.java new file mode 100644 index 00000000000..9761f6e89eb --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListIpv4SubnetsForGuestNetworkCmd.java @@ -0,0 +1,123 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.network; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.DataCenterIpv4SubnetResponse; +import org.apache.cloudstack.api.response.Ipv4SubnetForGuestNetworkResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.NetworkResponse; +import org.apache.cloudstack.api.response.VpcResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMap; + +@APICommand(name = "listIpv4SubnetsForGuestNetwork", + description = "Lists IPv4 subnets for guest networks.", + responseObject = Ipv4SubnetForGuestNetworkResponse.class, + since = "4.20.0", + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class ListIpv4SubnetsForGuestNetworkCmd extends BaseListCmd { + + @Parameter(name = ApiConstants.ID, + type = CommandType.UUID, + entityType = Ipv4SubnetForGuestNetworkResponse.class, + description = "UUID of the IPv4 subnet for guest network.") + private Long id; + + @Parameter(name = ApiConstants.PARENT_ID, + type = CommandType.UUID, + entityType = DataCenterIpv4SubnetResponse.class, + description = "UUID of zone Ipv4 subnet which the IPv4 subnet belongs to.") + private Long parentId; + + @Parameter(name = ApiConstants.SUBNET, + type = CommandType.STRING, + description = "The CIDR of the Ipv4 subnet.") + private String subnet; + + @Parameter(name = ApiConstants.ZONE_ID, + type = CommandType.UUID, + entityType = ZoneResponse.class, + description = "UUID of zone to which the IPv4 subnet belongs to.") + private Long zoneId; + + @Parameter(name = ApiConstants.NETWORK_ID, + type = CommandType.UUID, + entityType = NetworkResponse.class, + description = "UUID of network to which the IPv4 subnet is associated to.") + private Long networkId; + + @Parameter(name = ApiConstants.VPC_ID, + type = CommandType.UUID, + entityType = VpcResponse.class, + description = "UUID of VPC to which the IPv4 subnet is associated to.") + private Long vpcId; + + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public Long getParentId() { + return parentId; + } + + public Long getZoneId() { + return zoneId; + } + + public String getSubnet() { + return subnet; + } + + public Long getNetworkId() { + return networkId; + } + + public Long getVpcId() { + return vpcId; + } + + @Override + public void execute() { + List subnets = routedIpv4Manager.listIpv4GuestSubnetsForGuestNetwork(this); + ListResponse response = new ListResponse<>(); + List subnetResponses = new ArrayList<>(); + for (Ipv4GuestSubnetNetworkMap subnet : subnets) { + Ipv4SubnetForGuestNetworkResponse subnetResponse = routedIpv4Manager.createIpv4SubnetForGuestNetworkResponse(subnet); + subnetResponses.add(subnetResponse); + } + + response.setResponses(subnetResponses, subnets.size()); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } + +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListIpv4SubnetsForZoneCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListIpv4SubnetsForZoneCmd.java new file mode 100644 index 00000000000..2c2182250ed --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListIpv4SubnetsForZoneCmd.java @@ -0,0 +1,120 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.network; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.DataCenterIpv4SubnetResponse; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.ProjectResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnet; + +@APICommand(name = "listIpv4SubnetsForZone", + description = "Lists IPv4 subnets for zone.", + responseObject = DataCenterIpv4SubnetResponse.class, + since = "4.20.0", + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class ListIpv4SubnetsForZoneCmd extends BaseListCmd { + + @Parameter(name = ApiConstants.ID, + type = CommandType.UUID, + entityType = DataCenterIpv4SubnetResponse.class, + description = "UUID of the IPv4 subnet.") + private Long id; + + @Parameter(name = ApiConstants.ZONE_ID, + type = CommandType.UUID, + entityType = ZoneResponse.class, + description = "UUID of zone to which the IPv4 subnet belongs to.") + private Long zoneId; + + @Parameter(name = ApiConstants.SUBNET, + type = CommandType.STRING, + description = "CIDR of the IPv4 subnet.") + private String subnet; + + @Parameter(name = ApiConstants.ACCOUNT, + type = CommandType.STRING, + description = "the account which the IPv4 subnet is dedicated to. Must be used with the domainId parameter.") + private String accountName; + + @Parameter(name = ApiConstants.PROJECT_ID, + type = CommandType.UUID, + entityType = ProjectResponse.class, + description = "project who which the IPv4 subnet is dedicated to") + private Long projectId; + + @Parameter(name = ApiConstants.DOMAIN_ID, + type = CommandType.UUID, + entityType = DomainResponse.class, + description = "the domain ID which the IPv4 subnet is dedicated to.") + private Long domainId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public Long getZoneId() { + return zoneId; + } + + public String getSubnet() { + return subnet; + } + + public String getAccountName() { + return accountName; + } + + public Long getProjectId() { + return projectId; + } + + public Long getDomainId() { + return domainId; + } + + @Override + public void execute() { + List subnets = routedIpv4Manager.listDataCenterIpv4GuestSubnets(this); + ListResponse response = new ListResponse<>(); + List subnetResponses = new ArrayList<>(); + for (DataCenterIpv4GuestSubnet subnet : subnets) { + DataCenterIpv4SubnetResponse subnetResponse = routedIpv4Manager.createDataCenterIpv4SubnetResponse(subnet); + subnetResponses.add(subnetResponse); + } + + response.setResponses(subnetResponses, subnets.size()); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } + +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ReleaseDedicatedIpv4SubnetForZoneCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ReleaseDedicatedIpv4SubnetForZoneCmd.java new file mode 100644 index 00000000000..3e151b9b58f --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ReleaseDedicatedIpv4SubnetForZoneCmd.java @@ -0,0 +1,88 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.DataCenterIpv4SubnetResponse; +import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnet; + +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = "releaseIpv4SubnetForZone", + description = "Releases an existing dedicated IPv4 subnet for a zone.", + responseObject = DataCenterIpv4SubnetResponse.class, + since = "4.20.0", + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class ReleaseDedicatedIpv4SubnetForZoneCmd extends BaseAsyncCmd { + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = DataCenterIpv4SubnetResponse.class, required = true, description = "Id of the guest network IPv4 subnet") + private Long id; + + public Long getId() { + return id; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_ZONE_IP4_SUBNET_RELEASE; + } + + @Override + public String getEventDescription() { + return "Releasing a dedicated zone IPv4 subnet " + getId(); + } + + @Override + public void execute() { + try { + DataCenterIpv4GuestSubnet result = routedIpv4Manager.releaseDedicatedDataCenterIpv4GuestSubnet(this); + if (result != null) { + DataCenterIpv4SubnetResponse response = routedIpv4Manager.createDataCenterIpv4SubnetResponse(result); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to release guest network IPv4 subnet:" + getId()); + } + } catch (InvalidParameterValueException ex) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ex.getMessage()); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateIpv4SubnetForZoneCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateIpv4SubnetForZoneCmd.java new file mode 100644 index 00000000000..da7a23f50d9 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateIpv4SubnetForZoneCmd.java @@ -0,0 +1,98 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.DataCenterIpv4SubnetResponse; +import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnet; + +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = "updateIpv4SubnetForZone", + description = "Updates an existing IPv4 subnet for a zone.", + responseObject = DataCenterIpv4SubnetResponse.class, + since = "4.20.0", + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class UpdateIpv4SubnetForZoneCmd extends BaseAsyncCmd { + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = DataCenterIpv4SubnetResponse.class, required = true, description = "Id of the guest network IPv4 subnet") + private Long id; + + @Parameter(name = ApiConstants.SUBNET, + type = CommandType.STRING, + required = true, + description = "The new CIDR of the IPv4 subnet.") + private String subnet; + + public Long getId() { + return id; + } + + public String getSubnet() { + return subnet; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_ZONE_IP4_SUBNET_UPDATE; + } + + @Override + public String getEventDescription() { + return "Updating zone IPv4 subnet " + getId(); + } + + @Override + public void execute() { + try { + DataCenterIpv4GuestSubnet result = routedIpv4Manager.updateDataCenterIpv4GuestSubnet(this); + if (result != null) { + DataCenterIpv4SubnetResponse response = routedIpv4Manager.createDataCenterIpv4SubnetResponse(result); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update guest network IPv4 subnet:" + getId()); + } + } catch (InvalidParameterValueException ex) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ex.getMessage()); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/ChangeBgpPeersForNetworkCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/ChangeBgpPeersForNetworkCmd.java new file mode 100644 index 00000000000..1d6bffca342 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/ChangeBgpPeersForNetworkCmd.java @@ -0,0 +1,109 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network.bgp; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiArgValidator; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.admin.AdminCmd; +import org.apache.cloudstack.api.response.BgpPeerResponse; +import org.apache.cloudstack.api.response.NetworkResponse; + +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.network.Network; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; + +import java.util.List; + +@APICommand(name = "changeBgpPeersForNetwork", + description = "Change the BGP peers for a network.", + responseObject = BgpPeerResponse.class, + since = "4.20.0", + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class ChangeBgpPeersForNetworkCmd extends BaseAsyncCmd implements AdminCmd { + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.NETWORK_ID, + type = CommandType.UUID, + entityType = NetworkResponse.class, + required = true, + description = "UUID of the network which the Bgp Peers are associated to.", + validations = {ApiArgValidator.PositiveNumber}) + private Long networkId; + + @Parameter(name = ApiConstants.BGP_PEER_IDS, + type = CommandType.LIST, + collectionType = CommandType.UUID, + entityType = BgpPeerResponse.class, + description = "Ids of the Bgp Peer. If it is empty, all BGP peers will be unlinked.") + private List bgpPeerIds; + + public Long getNetworkId() { + return networkId; + } + + public List getBgpPeerIds() { + return bgpPeerIds; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_NETWORK_BGP_PEER_UPDATE; + } + + @Override + public String getEventDescription() { + return "Changing Bgp Peers for network " + getNetworkId(); + } + + @Override + public void execute() { + try { + Network result = routedIpv4Manager.changeBgpPeersForNetwork(this); + if (result != null) { + NetworkResponse response = _responseGenerator.createNetworkResponse(getResponseView(), result); + response.setResponseName(getCommandName()); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to change BGP Peers for network"); + } + } catch (InvalidParameterValueException ex) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ex.getMessage()); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/ChangeBgpPeersForVpcCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/ChangeBgpPeersForVpcCmd.java new file mode 100644 index 00000000000..0c89f3f1d43 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/ChangeBgpPeersForVpcCmd.java @@ -0,0 +1,109 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network.bgp; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiArgValidator; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.admin.AdminCmd; +import org.apache.cloudstack.api.response.BgpPeerResponse; +import org.apache.cloudstack.api.response.VpcResponse; + +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.network.vpc.Vpc; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; + +import java.util.List; + +@APICommand(name = "changeBgpPeersForVpc", + description = "Change the BGP peers for a VPC.", + responseObject = BgpPeerResponse.class, + since = "4.20.0", + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class ChangeBgpPeersForVpcCmd extends BaseAsyncCmd implements AdminCmd { + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.VPC_ID, + type = CommandType.UUID, + entityType = VpcResponse.class, + required = true, + description = "UUID of the VPC which the Bgp Peers are associated to.", + validations = {ApiArgValidator.PositiveNumber}) + private Long vpcId; + + @Parameter(name = ApiConstants.BGP_PEER_IDS, + type = CommandType.LIST, + collectionType = CommandType.UUID, + entityType = BgpPeerResponse.class, + description = "Ids of the Bgp Peer. If it is empty, all BGP peers will be unlinked.") + private List bgpPeerIds; + + public Long getVpcId() { + return vpcId; + } + + public List getBgpPeerIds() { + return bgpPeerIds; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_VPC_BGP_PEER_UPDATE; + } + + @Override + public String getEventDescription() { + return "Changing Bgp Peers for VPC " + getVpcId(); + } + + @Override + public void execute() { + try { + Vpc result = routedIpv4Manager.changeBgpPeersForVpc(this); + if (result != null) { + VpcResponse response = _responseGenerator.createVpcResponse(getResponseView(), result); + response.setResponseName(getCommandName()); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to change BGP Peers for vpc"); + } + } catch (InvalidParameterValueException ex) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ex.getMessage()); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/CreateBgpPeerCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/CreateBgpPeerCmd.java new file mode 100644 index 00000000000..80642124938 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/CreateBgpPeerCmd.java @@ -0,0 +1,168 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network.bgp; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiArgValidator; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.BgpPeerResponse; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.ProjectResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.network.BgpPeer; +import org.apache.commons.collections.MapUtils; + +import com.cloud.event.EventTypes; +import com.cloud.user.Account; + +import java.util.Collection; +import java.util.Map; + +@APICommand(name = "createBgpPeer", + description = "Creates a Bgp Peer for a zone.", + responseObject = BgpPeerResponse.class, + since = "4.20.0", + requestHasSensitiveInfo = true, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class CreateBgpPeerCmd extends BaseAsyncCmd { + + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ZONE_ID, + type = CommandType.UUID, + entityType = ZoneResponse.class, + required = true, + description = "UUID of the zone which the Bgp Peer belongs to.", + validations = {ApiArgValidator.PositiveNumber}) + private Long zoneId; + + @Parameter(name = ApiConstants.IP_ADDRESS, + type = CommandType.STRING, + description = "The IPv4 address of the Bgp Peer.") + private String ip4Address; + + @Parameter(name = ApiConstants.IP6_ADDRESS, + type = CommandType.STRING, + description = "The IPv6 address of the Bgp Peer.") + private String ip6Address; + + @Parameter(name = ApiConstants.AS_NUMBER, + type = CommandType.LONG, + required = true, + description = "The AS number of the Bgp Peer.") + private Long asNumber; + + @Parameter(name = ApiConstants.PASSWORD, + type = CommandType.STRING, + description = "The password of the Bgp Peer.") + private String password; + + @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "account who will own the Bgp Peer") + private String accountName; + + @Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class, description = "project who will own the Bgp Peer") + private Long projectId; + + @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class, description = "domain ID of the account owning the Bgp Peer") + private Long domainId; + + @Parameter(name = ApiConstants.DETAILS, type = CommandType.MAP, + description = "BGP peer details in key/value pairs.") + protected Map details; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + + public Long getZoneId() { + return zoneId; + } + + public String getIp4Address() { + return ip4Address; + } + + public String getIp6Address() { + return ip6Address; + } + + public String getPassword() { + return password; + } + + public Long getAsNumber() { + return asNumber; + } + + public String getAccountName() { + return accountName; + } + + public Long getProjectId() { + return projectId; + } + + public Long getDomainId() { + return domainId; + } + + public Map getDetails() { + if (MapUtils.isEmpty(details)) { + return null; + } + Collection paramsCollection = this.details.values(); + return (Map) (paramsCollection.toArray())[0]; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_BGP_PEER_CREATE; + } + + @Override + public String getEventDescription() { + return "Creating Bgp Peer " + getAsNumber() + " for zone=" + getZoneId(); + } + + @Override + public void execute() { + BgpPeer result = routedIpv4Manager.createBgpPeer(this); + if (result != null) { + BgpPeerResponse response = routedIpv4Manager.createBgpPeerResponse(result); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create Bgp Peer."); + } + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/DedicateBgpPeerCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/DedicateBgpPeerCmd.java new file mode 100644 index 00000000000..ec3d0ea1162 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/DedicateBgpPeerCmd.java @@ -0,0 +1,111 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network.bgp; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.BgpPeerResponse; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.ProjectResponse; +import org.apache.cloudstack.network.BgpPeer; + +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = "dedicateBgpPeer", + description = "Dedicates an existing Bgp Peer to an account or a domain.", + responseObject = BgpPeerResponse.class, + since = "4.20.0", + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class DedicateBgpPeerCmd extends BaseAsyncCmd { + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = BgpPeerResponse.class, required = true, description = "Id of the Bgp Peer") + private Long id; + + @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "account who will own the Bgp Peer") + private String accountName; + + @Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class, description = "project who will own the Bgp Peer") + private Long projectId; + + @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class, description = "domain ID of the account owning the Bgp Peer") + private Long domainId; + + public Long getId() { + return id; + } + + public String getAccountName() { + return accountName; + } + + public Long getProjectId() { + return projectId; + } + + public Long getDomainId() { + return domainId; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_BGP_PEER_DEDICATE; + } + + @Override + public String getEventDescription() { + return "Dedicating Bgp Peer " + getId(); + } + + @Override + public void execute() { + try { + BgpPeer result = routedIpv4Manager.dedicateBgpPeer(this); + if (result != null) { + BgpPeerResponse response = routedIpv4Manager.createBgpPeerResponse(result); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to dedicate Bgp Peer:" + getId()); + } + } catch (InvalidParameterValueException ex) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ex.getMessage()); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/DeleteBgpPeerCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/DeleteBgpPeerCmd.java new file mode 100644 index 00000000000..a01711efa44 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/DeleteBgpPeerCmd.java @@ -0,0 +1,88 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network.bgp; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.BgpPeerResponse; +import org.apache.cloudstack.api.response.SuccessResponse; + +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = "deleteBgpPeer", + description = "Deletes an existing Bgp Peer.", + responseObject = SuccessResponse.class, + since = "4.20.0", + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class DeleteBgpPeerCmd extends BaseAsyncCmd { + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = BgpPeerResponse.class, required = true, description = "Id of the Bgp Peer") + private Long id; + + public Long getId() { + return id; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_BGP_PEER_DELETE; + } + + @Override + public String getEventDescription() { + return "Deleting Bgp Peer " + getId(); + } + + @Override + public void execute() { + try { + boolean result = routedIpv4Manager.deleteBgpPeer(this); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete Bgp Peer:" + getId()); + } + } catch (InvalidParameterValueException ex) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ex.getMessage()); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/ListBgpPeersCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/ListBgpPeersCmd.java new file mode 100644 index 00000000000..ea15f0970e8 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/ListBgpPeersCmd.java @@ -0,0 +1,130 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.network.bgp; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.BgpPeerResponse; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.ProjectResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.network.BgpPeer; + +@APICommand(name = "listBgpPeers", + description = "Lists Bgp Peers.", + responseObject = BgpPeerResponse.class, + since = "4.20.0", + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class ListBgpPeersCmd extends BaseListCmd { + + @Parameter(name = ApiConstants.ID, + type = CommandType.UUID, + entityType = BgpPeerResponse.class, + description = "UUID of the Bgp Peer.") + private Long id; + + @Parameter(name = ApiConstants.ZONE_ID, + type = CommandType.UUID, + entityType = ZoneResponse.class, + description = "UUID of zone to which the Bgp Peer belongs to.") + private Long zoneId; + + @Parameter(name = ApiConstants.AS_NUMBER, + type = CommandType.LONG, + description = "AS number of the Bgp Peer.") + private Long asNumber; + + @Parameter(name = ApiConstants.ACCOUNT, + type = CommandType.STRING, + description = "the account which the Bgp Peer is dedicated to. Must be used with the domainId parameter.") + private String accountName; + + @Parameter(name = ApiConstants.PROJECT_ID, + type = CommandType.UUID, + entityType = ProjectResponse.class, + description = "project who which the Bgp Peer is dedicated to") + private Long projectId; + + @Parameter(name = ApiConstants.DOMAIN_ID, + type = CommandType.UUID, + entityType = DomainResponse.class, + description = "the domain ID which the Bgp Peer is dedicated to.") + private Long domainId; + + @Parameter(name = ApiConstants.IS_DEDICATED, + type = CommandType.BOOLEAN, + description = "Lists only dedicated or non-dedicated Bgp Peers. If not set, lists all dedicated and non-dedicated BGP peers the domain/account can access.") + private Boolean isDedicated; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public Long getZoneId() { + return zoneId; + } + + public Long getAsNumber() { + return asNumber; + } + + public String getAccountName() { + return accountName; + } + + public Long getProjectId() { + return projectId; + } + + public Long getDomainId() { + return domainId; + } + + public Boolean getDedicated() { + return isDedicated; + } + + @Override + public void execute() { + List subnets = routedIpv4Manager.listBgpPeers(this); + ListResponse response = new ListResponse<>(); + List subnetResponses = new ArrayList<>(); + for (BgpPeer subnet : subnets) { + BgpPeerResponse subnetResponse = routedIpv4Manager.createBgpPeerResponse(subnet); + subnetResponse.setObjectName("bgppeer"); + subnetResponses.add(subnetResponse); + } + + response.setResponses(subnetResponses, subnets.size()); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } + +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/ReleaseDedicatedBgpPeerCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/ReleaseDedicatedBgpPeerCmd.java new file mode 100644 index 00000000000..92610c233ef --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/ReleaseDedicatedBgpPeerCmd.java @@ -0,0 +1,88 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network.bgp; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.BgpPeerResponse; +import org.apache.cloudstack.network.BgpPeer; + +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = "releaseBgpPeer", + description = "Releases an existing dedicated Bgp Peer.", + responseObject = BgpPeerResponse.class, + since = "4.20.0", + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class ReleaseDedicatedBgpPeerCmd extends BaseAsyncCmd { + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = BgpPeerResponse.class, required = true, description = "Id of the Bgp Peer") + private Long id; + + public Long getId() { + return id; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_BGP_PEER_RELEASE; + } + + @Override + public String getEventDescription() { + return "Releasing a dedicated Bgp Peer " + getId(); + } + + @Override + public void execute() { + try { + BgpPeer result = routedIpv4Manager.releaseDedicatedBgpPeer(this); + if (result != null) { + BgpPeerResponse response = routedIpv4Manager.createBgpPeerResponse(result); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to release Bgp Peer:" + getId()); + } + } catch (InvalidParameterValueException ex) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ex.getMessage()); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/UpdateBgpPeerCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/UpdateBgpPeerCmd.java new file mode 100644 index 00000000000..ae44330ea03 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/bgp/UpdateBgpPeerCmd.java @@ -0,0 +1,149 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network.bgp; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.BgpPeerResponse; +import org.apache.cloudstack.network.BgpPeer; + +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.commons.collections.MapUtils; + +import java.util.Collection; +import java.util.Map; + +@APICommand(name = "updateBgpPeer", + description = "Updates an existing Bgp Peer.", + responseObject = BgpPeerResponse.class, + since = "4.20.0", + requestHasSensitiveInfo = true, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}) +public class UpdateBgpPeerCmd extends BaseAsyncCmd { + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = BgpPeerResponse.class, required = true, description = "Id of the Bgp Peer") + private Long id; + + @Parameter(name = ApiConstants.IP_ADDRESS, + type = CommandType.STRING, + description = "The IPv4 address of the Bgp Peer.") + private String ip4Address; + + @Parameter(name = ApiConstants.IP6_ADDRESS, + type = CommandType.STRING, + description = "The IPv6 address of the Bgp Peer.") + private String ip6Address; + + @Parameter(name = ApiConstants.AS_NUMBER, + type = CommandType.LONG, + description = "The AS number of the Bgp Peer.") + private Long asNumber; + + @Parameter(name = ApiConstants.PASSWORD, + type = CommandType.STRING, + description = "The password of the Bgp Peer.") + private String password; + + @Parameter(name = ApiConstants.DETAILS, type = CommandType.MAP, + description = "BGP peer details in key/value pairs.") + protected Map details; + + @Parameter(name = ApiConstants.CLEAN_UP_DETAILS, + type = CommandType.BOOLEAN, + description = "optional boolean field, which indicates if details should be cleaned up or not (if set to true, details are removed for this resource; if false or not set, no action)") + private Boolean cleanupDetails; + + public Long getId() { + return id; + } + + public String getIp4Address() { + return ip4Address; + } + + public String getIp6Address() { + return ip6Address; + } + + public Long getAsNumber() { + return asNumber; + } + + public String getPassword() { + return password; + } + + public Map getDetails() { + if (MapUtils.isEmpty(details)) { + return null; + } + Collection paramsCollection = this.details.values(); + return (Map) (paramsCollection.toArray())[0]; + } + + public boolean isCleanupDetails(){ + return cleanupDetails == null ? false : cleanupDetails.booleanValue(); + } + + @Override + public String getEventType() { + return EventTypes.EVENT_BGP_PEER_UPDATE; + } + + @Override + public String getEventDescription() { + return "Updating Bgp Peer " + getId(); + } + + @Override + public void execute() { + try { + BgpPeer result = routedIpv4Manager.updateBgpPeer(this); + if (result != null) { + BgpPeerResponse response = routedIpv4Manager.createBgpPeerResponse(result); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update Bgp Peer:" + getId()); + } + } catch (InvalidParameterValueException ex) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ex.getMessage()); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java index 4562aa7da19..8f6d5413d72 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java @@ -54,7 +54,11 @@ public class CreateServiceOfferingCmd extends BaseCmd { @Parameter(name = ApiConstants.CPU_NUMBER, type = CommandType.INTEGER, required = false, description = "the CPU number of the service offering") private Integer cpuNumber; - @Parameter(name = ApiConstants.CPU_SPEED, type = CommandType.INTEGER, required = false, description = "the CPU speed of the service offering in MHz.") + @Parameter(name = ApiConstants.CPU_SPEED, type = CommandType.INTEGER, required = false, description = "For VMware and Xen based hypervisors this is the CPU speed of the service offering in MHz.\n" + + "For the KVM hypervisor," + + " the values of the parameters cpuSpeed and cpuNumber will be used to calculate the `shares` value. This value is used by the KVM hypervisor to calculate how much time" + + " the VM will have access to the host's CPU. The `shares` value does not have a unit, and its purpose is being a weight value for the host to compare between its guest" + + " VMs. For more information, see https://libvirt.org/formatdomain.html#cpu-tuning.") private Integer cpuSpeed; @Parameter(name = ApiConstants.DISPLAY_TEXT, type = CommandType.STRING, description = "The display text of the service offering, defaults to 'name'.") @@ -242,6 +246,12 @@ public class CreateServiceOfferingCmd extends BaseCmd { @Parameter(name = ApiConstants.ENCRYPT_ROOT, type = CommandType.BOOLEAN, description = "VMs using this offering require root volume encryption", since="4.18") private Boolean encryptRoot; + @Parameter(name = ApiConstants.PURGE_RESOURCES, type = CommandType.BOOLEAN, + description = "Whether to cleanup instance and its associated resource from database upon expunge of the instance", + since="4.20") + private Boolean purgeResources; + + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// @@ -269,7 +279,7 @@ public class CreateServiceOfferingCmd extends BaseCmd { public String getServiceOfferingName() { if (StringUtils.isEmpty(serviceOfferingName)) { - throw new InvalidParameterValueException("Failed to create service offering because offering name has not been spified."); + throw new InvalidParameterValueException("Failed to create service offering because offering name has not been specified."); } return serviceOfferingName; } @@ -477,6 +487,10 @@ public class CreateServiceOfferingCmd extends BaseCmd { return false; } + public boolean isPurgeResources() { + return Boolean.TRUE.equals(purgeResources); + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java index 7d6bae86083..e580f0d9f41 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java @@ -89,6 +89,11 @@ public class UpdateServiceOfferingCmd extends BaseCmd { description = "state of the service offering") private String serviceOfferingState; + @Parameter(name = ApiConstants.PURGE_RESOURCES, type = CommandType.BOOLEAN, + description = "Whether to cleanup VM and its associated resource upon expunge", + since="4.20") + private Boolean purgeResources; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -185,6 +190,10 @@ public class UpdateServiceOfferingCmd extends BaseCmd { return state; } + public boolean isPurgeResources() { + return Boolean.TRUE.equals(purgeResources); + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/PurgeExpungedResourcesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/PurgeExpungedResourcesCmd.java new file mode 100644 index 00000000000..b6833f09733 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/PurgeExpungedResourcesCmd.java @@ -0,0 +1,131 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.resource; + + +import java.util.Date; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.PurgeExpungedResourcesResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.resource.ResourceCleanupService; + +import com.cloud.event.EventTypes; + +@APICommand(name = "purgeExpungedResources", + description = "Purge expunged resources", + responseObject = SuccessResponse.class, + responseView = ResponseObject.ResponseView.Full, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}, + since = "4.20") +public class PurgeExpungedResourcesCmd extends BaseAsyncCmd { + + @Inject + ResourceCleanupService resourceCleanupService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.RESOURCE_TYPE, type = BaseCmd.CommandType.STRING, + description = "The type of the resource which need to be purged. Supported types: " + + "VirtualMachine") + private String resourceType; + + @Parameter(name = ApiConstants.BATCH_SIZE, type = CommandType.LONG, + description = "The size of batch used during purging") + private Long batchSize; + + @Parameter(name = ApiConstants.START_DATE, + type = CommandType.DATE, + description = "The start date range of the expunged resources used for purging " + + "(use format \"yyyy-MM-dd\" or \"yyyy-MM-dd HH:mm:ss\")") + private Date startDate; + + @Parameter(name = ApiConstants.END_DATE, + type = CommandType.DATE, + description = "The end date range of the expunged resources used for purging " + + "(use format \"yyyy-MM-dd\" or \"yyyy-MM-dd HH:mm:ss\")") + private Date endDate; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + + public String getResourceType() { + return resourceType; + } + + public Long getBatchSize() { + return batchSize; + } + + public Date getStartDate() { + return startDate; + } + + public Date getEndDate() { + return endDate; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + @Override + public String getEventType() { + return EventTypes.EVENT_PURGE_EXPUNGED_RESOURCES; + } + + @Override + public String getEventDescription() { + return "Purging expunged resources"; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() { + try { + long result = resourceCleanupService.purgeExpungedResources(this); + PurgeExpungedResourcesResponse response = new PurgeExpungedResourcesResponse(); + response.setResourceCount(result); + response.setObjectName(getCommandName().toLowerCase()); + setResponseObject(response); + } catch (Exception e) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getLocalizedMessage()); + } + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ChangeStoragePoolScopeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ChangeStoragePoolScopeCmd.java new file mode 100644 index 00000000000..d3b6a074610 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ChangeStoragePoolScopeCmd.java @@ -0,0 +1,98 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.storage; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.ClusterResponse; +import org.apache.cloudstack.api.response.StoragePoolResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; + +import com.cloud.event.EventTypes; +import com.cloud.storage.StoragePool; + +@APICommand(name = "changeStoragePoolScope", description = "Changes the scope of a storage pool when the pool is in Disabled state." + + "This feature is officially tested and supported for Hypervisors: KVM and VMware, Protocols: NFS and Ceph, and Storage Provider: DefaultPrimary. " + + "There might be extra steps involved to make this work for other hypervisors and storage options.", + responseObject = SuccessResponse.class, since= "4.19.1", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +public class ChangeStoragePoolScopeCmd extends BaseAsyncCmd { + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = StoragePoolResponse.class, required = true, description = "the Id of the storage pool") + private Long id; + + @Parameter(name = ApiConstants.SCOPE, type = CommandType.STRING, required = true, description = "the scope of the storage: cluster or zone") + private String scope; + + @Parameter(name = ApiConstants.CLUSTER_ID, type = CommandType.UUID, entityType = ClusterResponse.class, description = "the Id of the cluster to use if scope is being set to Cluster") + private Long clusterId; + + @Override + public ApiCommandResourceType getApiResourceType() { + return ApiCommandResourceType.StoragePool; + } + + @Override + public Long getApiResourceId() { + return getId(); + } + + public String getEventType() { + return EventTypes.EVENT_CHANGE_STORAGE_POOL_SCOPE; + } + + @Override + public String getEventDescription() { + String description = "Change storage pool scope. Storage pool Id: "; + StoragePool pool = _entityMgr.findById(StoragePool.class, getId()); + if (pool != null) { + description += pool.getUuid(); + } else { + description += getId(); + } + description += " to " + getScope(); + return description; + } + + @Override + public void execute() { + _storageService.changeStoragePoolScope(this); + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccountId(); + } + + public Long getId() { + return id; + } + + public String getScope() { + return scope; + } + + public Long getClusterId() { + return clusterId; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java index 293ed3103cb..57a87939b6b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java @@ -72,7 +72,8 @@ public class ListStoragePoolsCmd extends BaseListCmd { @Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, description = "host ID of the storage pools") private Long hostId; - + @Parameter(name = ApiConstants.STORAGE_CUSTOM_STATS, type = CommandType.BOOLEAN, description = "If true, lists the custom stats of the storage pool", since = "4.18.1") + private Boolean customStats; ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -129,6 +130,10 @@ public class ListStoragePoolsCmd extends BaseListCmd { this.scope = scope; } + public Boolean getCustomStats() { + return customStats != null && customStats; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java index bcc438b957b..0e1631a46ba 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java @@ -39,10 +39,17 @@ public class UpdateImageStoreCmd extends BaseCmd { @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = ImageStoreResponse.class, required = true, description = "Image Store UUID") private Long id; - @Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = true, description = "If set to true, it designates the corresponding image store to read-only, " + - "hence not considering them during storage migration") + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = false, description = "The new name for the Image Store.") + private String name; + + @Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = false, + description = "If set to true, it designates the corresponding image store to read-only, hence not considering them during storage migration") private Boolean readonly; + @Parameter(name = ApiConstants.CAPACITY_BYTES, type = CommandType.LONG, required = false, + description = "The number of bytes CloudStack can use on this image storage.\n\tNOTE: this will be overwritten by the StatsCollector as soon as there is a SSVM to query the storage.") + private Long capacityBytes; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -51,17 +58,25 @@ public class UpdateImageStoreCmd extends BaseCmd { return id; } + public String getName() { + return name; + } + public Boolean getReadonly() { return readonly; } + public Long getCapacityBytes() { + return capacityBytes; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @Override public void execute() { - ImageStore result = _storageService.updateImageStoreStatus(getId(), getReadonly()); + ImageStore result = _storageService.updateImageStore(this); ImageStoreResponse storeResponse = null; if (result != null) { storeResponse = _responseGenerator.createImageStoreResponse(result); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java index 13f02ef83c2..f2d7bbeb189 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java @@ -31,6 +31,8 @@ import org.apache.cloudstack.api.response.StoragePoolResponse; import com.cloud.storage.StoragePool; import com.cloud.user.Account; +import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.ObjectUtils; @SuppressWarnings("rawtypes") @APICommand(name = "updateStoragePool", description = "Updates a storage pool.", responseObject = StoragePoolResponse.class, since = "3.0.0", @@ -147,7 +149,17 @@ public class UpdateStoragePoolCmd extends BaseCmd { @Override public void execute() { - StoragePool result = _storageService.updateStoragePool(this); + StoragePool result = null; + if (ObjectUtils.anyNotNull(name, capacityIops, capacityBytes, url, isTagARule, tags) || + MapUtils.isNotEmpty(details)) { + result = _storageService.updateStoragePool(this); + } + + if (enabled != null) { + result = enabled ? _storageService.enablePrimaryStoragePool(id) + : _storageService.disablePrimaryStoragePool(id); + } + if (result != null) { StoragePoolResponse response = _responseGenerator.createStoragePoolResponse(result); response.setResponseName(getCommandName()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/heuristics/RemoveSecondaryStorageSelectorCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/heuristics/RemoveSecondaryStorageSelectorCmd.java index 79554f44782..468c87d4d99 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/heuristics/RemoveSecondaryStorageSelectorCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/heuristics/RemoveSecondaryStorageSelectorCmd.java @@ -27,7 +27,7 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.secstorage.heuristics.Heuristic; @APICommand(name = "removeSecondaryStorageSelector", description = "Removes an existing secondary storage selector.", since = "4.19.0", responseObject = - SecondaryStorageHeuristicsResponse.class, requestHasSensitiveInfo = false, entityType = {Heuristic.class}, responseHasSensitiveInfo = false, authorized = {RoleType.Admin}) + SuccessResponse.class, requestHasSensitiveInfo = false, entityType = {Heuristic.class}, responseHasSensitiveInfo = false, authorized = {RoleType.Admin}) public class RemoveSecondaryStorageSelectorCmd extends BaseCmd { @Parameter(name = ApiConstants.ID, type = BaseCmd.CommandType.UUID, entityType = SecondaryStorageHeuristicsResponse.class, required = true, description = "The unique identifier of the secondary storage selector to be removed.") diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/GenerateUsageRecordsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/GenerateUsageRecordsCmd.java index 491b0fe85ba..a0314586d92 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/GenerateUsageRecordsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/GenerateUsageRecordsCmd.java @@ -47,13 +47,13 @@ public class GenerateUsageRecordsCmd extends BaseCmd { @Parameter(name = ApiConstants.END_DATE, type = CommandType.DATE, - required = true, + required = false, description = "End date range for usage record query. Use yyyy-MM-dd as the date format, e.g. startDate=2009-06-03.") private Date endDate; @Parameter(name = ApiConstants.START_DATE, type = CommandType.DATE, - required = true, + required = false, description = "Start date range for usage record query. Use yyyy-MM-dd as the date format, e.g. startDate=2009-06-01.") private Date startDate; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListUsageTypesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListUsageTypesCmd.java index 2772743c75a..b993735dba7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListUsageTypesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListUsageTypesCmd.java @@ -23,6 +23,7 @@ import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.UsageTypeResponse; +import org.apache.cloudstack.usage.UsageTypes; import com.cloud.user.Account; @@ -37,8 +38,8 @@ public class ListUsageTypesCmd extends BaseCmd { @Override public void execute() { - List result = _usageService.listUsageTypes(); - ListResponse response = new ListResponse(); + List result = UsageTypes.listUsageTypes(); + ListResponse response = new ListResponse<>(); response.setResponses(result); response.setResponseName(getCommandName()); this.setResponseObject(response); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java index 3f8d386d266..c9e1e934152 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java @@ -66,7 +66,7 @@ public class UpdateUserCmd extends BaseCmd { @Parameter(name = ApiConstants.CURRENT_PASSWORD, type = CommandType.STRING, description = "Current password that was being used by the user. You must inform the current password when updating the password.", acceptedOnAdminPort = false) private String currentPassword; - @Parameter(name = ApiConstants.SECRET_KEY, type = CommandType.STRING, description = "The secret key for the user. Must be specified with userApiKey") + @Parameter(name = ApiConstants.USER_SECRET_KEY, type = CommandType.STRING, description = "The secret key for the user. Must be specified with userApiKey") private String secretKey; @Parameter(name = ApiConstants.TIMEZONE, diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java index dd897218a4d..ae6ceff26c7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java @@ -201,8 +201,8 @@ public class ImportUnmanagedInstanceCmd extends BaseAsyncCmd { for (Map entry : (Collection>)nicNetworkList.values()) { String nic = entry.get(VmDetailConstants.NIC); String networkUuid = entry.get(VmDetailConstants.NETWORK); - if (logger.isTraceEnabled()) { - logger.trace(String.format("nic, '%s', goes on net, '%s'", nic, networkUuid)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("nic, '%s', goes on net, '%s'", nic, networkUuid)); } if (StringUtils.isAnyEmpty(nic, networkUuid) || _entityMgr.findByUuid(Network.class, networkUuid) == null) { throw new InvalidParameterValueException(String.format("Network ID: %s for NIC ID: %s is invalid", networkUuid, nic)); @@ -219,8 +219,8 @@ public class ImportUnmanagedInstanceCmd extends BaseAsyncCmd { for (Map entry : (Collection>)nicIpAddressList.values()) { String nic = entry.get(VmDetailConstants.NIC); String ipAddress = StringUtils.defaultIfEmpty(entry.get(VmDetailConstants.IP4_ADDRESS), null); - if (logger.isTraceEnabled()) { - logger.trace(String.format("nic, '%s', gets ip, '%s'", nic, ipAddress)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("nic, '%s', gets ip, '%s'", nic, ipAddress)); } if (StringUtils.isEmpty(nic)) { throw new InvalidParameterValueException(String.format("NIC ID: '%s' is invalid for IP address mapping", nic)); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportVmCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportVmCmd.java index 1a34b7ea6cc..6f148ff0ee4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportVmCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportVmCmd.java @@ -37,6 +37,7 @@ import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VmwareDatacenterResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.vm.VmImportService; +import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; @@ -116,40 +117,44 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd { description = "Temp Path on external host for disk image copy" ) private String tmpPath; - // Import from Vmware to KVM migration parameters + // Import from VMware to KVM migration parameters @Parameter(name = ApiConstants.EXISTING_VCENTER_ID, type = CommandType.UUID, entityType = VmwareDatacenterResponse.class, - description = "(only for importing migrated VMs from Vmware to KVM) UUID of a linked existing vCenter") + description = "(only for importing VMs from VMware to KVM) UUID of a linked existing vCenter") private Long existingVcenterId; @Parameter(name = ApiConstants.HOST_IP, type = BaseCmd.CommandType.STRING, - description = "(only for importing migrated VMs from Vmware to KVM) VMware ESXi host IP/Name.") + description = "(only for importing VMs from VMware to KVM) VMware ESXi host IP/Name.") private String hostip; @Parameter(name = ApiConstants.VCENTER, type = CommandType.STRING, - description = "(only for importing migrated VMs from Vmware to KVM) The name/ip of vCenter. Make sure it is IP address or full qualified domain name for host running vCenter server.") + description = "(only for importing VMs from VMware to KVM) The name/ip of vCenter. Make sure it is IP address or full qualified domain name for host running vCenter server.") private String vcenter; @Parameter(name = ApiConstants.DATACENTER_NAME, type = CommandType.STRING, - description = "(only for importing migrated VMs from Vmware to KVM) Name of VMware datacenter.") + description = "(only for importing VMs from VMware to KVM) Name of VMware datacenter.") private String datacenterName; @Parameter(name = ApiConstants.CLUSTER_NAME, type = CommandType.STRING, - description = "(only for importing migrated VMs from Vmware to KVM) Name of VMware cluster.") + description = "(only for importing VMs from VMware to KVM) Name of VMware cluster.") private String clusterName; @Parameter(name = ApiConstants.CONVERT_INSTANCE_HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, - description = "(only for importing migrated VMs from Vmware to KVM) optional - the host to perform the virt-v2v migration from VMware to KVM.") + description = "(only for importing VMs from VMware to KVM) optional - the host to perform the virt-v2v migration from VMware to KVM.") private Long convertInstanceHostId; @Parameter(name = ApiConstants.CONVERT_INSTANCE_STORAGE_POOL_ID, type = CommandType.UUID, entityType = StoragePoolResponse.class, - description = "(only for importing migrated VMs from Vmware to KVM) optional - the temporary storage pool to perform the virt-v2v migration from VMware to KVM.") + description = "(only for importing VMs from VMware to KVM) optional - the temporary storage pool to perform the virt-v2v migration from VMware to KVM.") private Long convertStoragePoolId; + @Parameter(name = ApiConstants.FORCE_MS_TO_IMPORT_VM_FILES, type = CommandType.BOOLEAN, + description = "(only for importing VMs from VMware to KVM) optional - if true, forces MS to import VM file(s) to temporary storage, else uses KVM Host if ovftool is available, falls back to MS if not.") + private Boolean forceMsToImportVmFiles; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -198,6 +203,10 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd { return convertStoragePoolId; } + public Boolean getForceMsToImportVmFiles() { + return BooleanUtils.toBooleanDefaultIfNull(forceMsToImportVmFiles, false); + } + public String getHypervisor() { return hypervisor; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListAffectedVmsForStorageScopeChangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListAffectedVmsForStorageScopeChangeCmd.java new file mode 100644 index 00000000000..d586a81b685 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListAffectedVmsForStorageScopeChangeCmd.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.api.command.admin.vm; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.ClusterResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.StoragePoolResponse; +import org.apache.cloudstack.api.response.VirtualMachineResponse; + +import com.cloud.vm.VirtualMachine; + +@APICommand(name = "listAffectedVmsForStorageScopeChange", + description = "List user and system VMs that need to be stopped and destroyed respectively for changing the scope of the storage pool from Zone to Cluster.", + responseObject = VirtualMachineResponse.class, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.19.1", + authorized = {RoleType.Admin}) +public class ListAffectedVmsForStorageScopeChangeCmd extends BaseListCmd { + + @Parameter(name = ApiConstants.CLUSTER_ID, + type = CommandType.UUID, + entityType = ClusterResponse.class, + required = true, + description = "the Id of the cluster the scope of the storage pool is being changed to") + private Long clusterIdForScopeChange; + + @Parameter(name = ApiConstants.STORAGE_ID, + type = CommandType.UUID, + entityType = StoragePoolResponse.class, + required = true, + description = "the Id of the storage pool on which change scope operation is being done") + private Long storageId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getClusterIdForScopeChange() { + return clusterIdForScopeChange; + } + + public Long getStorageId() { + return storageId; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() { + ListResponse response = _queryService.listAffectedVmsForStorageScopeChange(this); + response.setResponseName(getCommandName()); + response.setObjectName(VirtualMachine.class.getSimpleName().toLowerCase()); + setResponseObject(response); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListVnfAppliancesCmdByAdmin.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListVnfAppliancesCmdByAdmin.java new file mode 100644 index 00000000000..1a820ab9a48 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListVnfAppliancesCmdByAdmin.java @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.vm; + +import com.cloud.vm.VirtualMachine; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ResponseObject.ResponseView; +import org.apache.cloudstack.api.command.admin.AdminCmd; + +import org.apache.cloudstack.api.command.user.vm.ListVnfAppliancesCmd; +import org.apache.cloudstack.api.response.UserVmResponse; + +@APICommand(name = "listVnfAppliances", description = "List VNF appliance owned by the account.", + responseObject = UserVmResponse.class, + responseView = ResponseView.Full, + entityType = {VirtualMachine.class}, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + authorized = {RoleType.Admin}, + since = "4.19.1") +public class ListVnfAppliancesCmdByAdmin extends ListVnfAppliancesCmd implements AdminCmd { +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCCmdByAdmin.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCCmdByAdmin.java index bd00876ed36..9dc31f8cefe 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCCmdByAdmin.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCCmdByAdmin.java @@ -17,13 +17,31 @@ package org.apache.cloudstack.api.command.admin.vpc; import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ResponseObject.ResponseView; import org.apache.cloudstack.api.command.admin.AdminCmd; import org.apache.cloudstack.api.command.user.vpc.CreateVPCCmd; +import org.apache.cloudstack.api.response.BgpPeerResponse; import org.apache.cloudstack.api.response.VpcResponse; import com.cloud.network.vpc.Vpc; +import java.util.List; + @APICommand(name = "createVPC", description = "Creates a VPC", responseObject = VpcResponse.class, responseView = ResponseView.Full, entityType = {Vpc.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) -public class CreateVPCCmdByAdmin extends CreateVPCCmd implements AdminCmd {} +public class CreateVPCCmdByAdmin extends CreateVPCCmd implements AdminCmd { + @Parameter(name = ApiConstants.BGP_PEER_IDS, + type = CommandType.LIST, + collectionType = CommandType.UUID, + entityType = BgpPeerResponse.class, + description = "Ids of the Bgp Peer for the VPC", + since = "4.20.0") + private List bgpPeerIds; + + + public List getBgpPeerIds() { + return bgpPeerIds; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java index dd5c815238e..73b4f5df196 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java @@ -118,12 +118,6 @@ public class CreateVPCOfferingCmd extends BaseAsyncCreateCmd { since = "4.20.0") private Boolean forNsx; - @Parameter(name = ApiConstants.NSX_MODE, - type = CommandType.STRING, - description = "Indicates the mode with which the network will operate. Valid option: NATTED or ROUTED", - since = "4.20.0") - private String nsxMode; - @Parameter(name = ApiConstants.NSX_SUPPORT_LB, type = CommandType.BOOLEAN, description = "true if network offering for NSX VPC offering supports Load balancer service.", @@ -136,6 +130,22 @@ public class CreateVPCOfferingCmd extends BaseAsyncCreateCmd { since = "4.16") private Boolean enable; + @Parameter(name = ApiConstants.NETWORK_MODE, + type = CommandType.STRING, + description = "Indicates the mode with which the network will operate. Valid option: NATTED or ROUTED", + since = "4.20.0") + private String networkMode; + + @Parameter(name = ApiConstants.SPECIFY_AS_NUMBER, type = CommandType.BOOLEAN, since = "4.20.0", + description = "true if the VPC offering supports choosing AS number") + private Boolean specifyAsNumber; + + @Parameter(name = ApiConstants.ROUTING_MODE, + type = CommandType.STRING, + since = "4.20.0", + description = "the routing mode for the VPC offering. Supported types are: Static or Dynamic.") + private String routingMode; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -173,8 +183,8 @@ public class CreateVPCOfferingCmd extends BaseAsyncCreateCmd { return BooleanUtils.isTrue(forNsx); } - public String getNsxMode() { - return nsxMode; + public String getNetworkMode() { + return networkMode; } public boolean getNsxSupportsLbService() { @@ -265,6 +275,14 @@ public class CreateVPCOfferingCmd extends BaseAsyncCreateCmd { return false; } + public Boolean getSpecifyAsNumber() { + return BooleanUtils.toBoolean(specifyAsNumber); + } + + public String getRoutingMode() { + return routingMode; + } + @Override public void create() throws ResourceAllocationException { VpcOffering vpcOff = _vpcProvSvc.createVpcOffering(this); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/account/AddUserToProjectCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/account/AddUserToProjectCmd.java index d38ae057f05..9cd845c774c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/account/AddUserToProjectCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/account/AddUserToProjectCmd.java @@ -103,7 +103,7 @@ public class AddUserToProjectCmd extends BaseAsyncCmd { @Override public String getEventDescription() { - return "Adding user "+getUsername()+" to Project "+getProjectId(); + return "Adding user " + getUsername() + " to project: " + getProjectId(); } ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/account/DeleteUserFromProjectCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/account/DeleteUserFromProjectCmd.java index 8319911c5c8..0731d837804 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/account/DeleteUserFromProjectCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/account/DeleteUserFromProjectCmd.java @@ -81,7 +81,6 @@ public class DeleteUserFromProjectCmd extends BaseAsyncCmd { return "Removing user " + userId + " from project: " + projectId; } - @Override public long getEntityOwnerId() { Project project = _projectService.getProject(projectId); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java index f5b8c3da855..2d84b20c582 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java @@ -100,7 +100,7 @@ public class CreateAutoScaleVmProfileCmd extends BaseAsyncCreateCmd { description = "an optional binary data that can be sent to the virtual machine upon a successful deployment. " + "This binary data must be base64 encoded before adding it to the request. " + "Using HTTP GET (via querystring), you can send up to 4KB of data after base64 encoding. " + - "Using HTTP POST(via POST body), you can send up to 1MB of data after base64 encoding." + + "Using HTTP POST (via POST body), you can send up to 1MB of data after base64 encoding. " + "You also need to change vm.userdata.max.length value", length = 1048576, since = "4.18.0") diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateAutoScaleVmProfileCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateAutoScaleVmProfileCmd.java index e8ca502b5cd..0b73fd91b52 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateAutoScaleVmProfileCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateAutoScaleVmProfileCmd.java @@ -95,7 +95,7 @@ public class UpdateAutoScaleVmProfileCmd extends BaseAsyncCustomIdCmd { description = "an optional binary data that can be sent to the virtual machine upon a successful deployment. " + "This binary data must be base64 encoded before adding it to the request. " + "Using HTTP GET (via querystring), you can send up to 4KB of data after base64 encoding. " + - "Using HTTP POST(via POST body), you can send up to 1MB of data after base64 encoding." + + "Using HTTP POST (via POST body), you can send up to 1MB of data after base64 encoding. " + "You also need to change vm.userdata.max.length value", length = 1048576, since = "4.18.0") diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupScheduleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupScheduleCmd.java index 6cc765328f6..a7610717435 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupScheduleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupScheduleCmd.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.api.command.user.backup; import javax.inject.Inject; +import com.amazonaws.util.CollectionUtils; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -27,6 +28,7 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.BackupScheduleResponse; +import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.backup.BackupManager; import org.apache.cloudstack.backup.BackupSchedule; @@ -39,6 +41,9 @@ import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.utils.exception.CloudRuntimeException; +import java.util.ArrayList; +import java.util.List; + @APICommand(name = "listBackupSchedule", description = "List backup schedule of a VM", responseObject = BackupScheduleResponse.class, since = "4.14.0", @@ -74,9 +79,14 @@ public class ListBackupScheduleCmd extends BaseCmd { @Override public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { try{ - BackupSchedule schedule = backupManager.listBackupSchedule(getVmId()); - if (schedule != null) { - BackupScheduleResponse response = _responseGenerator.createBackupScheduleResponse(schedule); + List schedules = backupManager.listBackupSchedule(getVmId()); + ListResponse response = new ListResponse<>(); + List scheduleResponses = new ArrayList<>(); + if (CollectionUtils.isNullOrEmpty(schedules)) { + for (BackupSchedule schedule : schedules) { + scheduleResponses.add(_responseGenerator.createBackupScheduleResponse(schedule)); + } + response.setResponses(scheduleResponses, schedules.size()); response.setResponseName(getCommandName()); setResponseObject(response); } else { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupsCmd.java index 8597d97278c..7d87cc37e6c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupsCmd.java @@ -116,6 +116,7 @@ public class ListBackupsCmd extends BaseListProjectAndAccountResourcesCmd { Pair, Integer> result = backupManager.listBackups(this); setupResponseBackupList(result.first(), result.second()); } catch (Exception e) { + logger.debug("Exception while listing backups", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/repository/AddBackupRepositoryCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/repository/AddBackupRepositoryCmd.java new file mode 100644 index 00000000000..5d0c838bc37 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/repository/AddBackupRepositoryCmd.java @@ -0,0 +1,137 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.user.backup.repository; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.BackupRepositoryResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.backup.BackupRepository; +import org.apache.cloudstack.backup.BackupRepositoryService; +import org.apache.cloudstack.context.CallContext; + +import javax.inject.Inject; + +@APICommand(name = "addBackupRepository", + description = "Adds a backup repository to store NAS backups", + responseObject = BackupRepositoryResponse.class, since = "4.20.0", + authorized = {RoleType.Admin}) +public class AddBackupRepositoryCmd extends BaseCmd { + + @Inject + private BackupRepositoryService backupRepositoryService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, description = "name of the backup repository") + private String name; + + @Parameter(name = ApiConstants.ADDRESS, type = CommandType.STRING, required = true, description = "address of the backup repository") + private String address; + + @Parameter(name = ApiConstants.TYPE, type = CommandType.STRING, required = true, description = "type of the backup repository storage. Supported values: nfs, cephfs, cifs") + private String type; + + @Parameter(name = ApiConstants.PROVIDER, type = CommandType.STRING, description = "backup repository provider") + private String provider; + + @Parameter(name = ApiConstants.MOUNT_OPTIONS, type = CommandType.STRING, description = "shared storage mount options") + private String mountOptions; + + @Parameter(name = ApiConstants.ZONE_ID, + type = CommandType.UUID, + entityType = ZoneResponse.class, + required = true, + description = "ID of the zone where the backup repository is to be added") + private Long zoneId; + + @Parameter(name = ApiConstants.CAPACITY_BYTES, type = CommandType.LONG, description = "capacity of this backup repository") + private Long capacityBytes; + + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public BackupRepositoryService getBackupRepositoryService() { + return backupRepositoryService; + } + + public String getName() { + return name; + } + + public String getType() { + if ("cephfs".equalsIgnoreCase(type)) { + return "ceph"; + } + return type.toLowerCase(); + } + + public String getAddress() { + return address; + } + + public String getProvider() { + return provider; + } + + public String getMountOptions() { + return mountOptions == null ? "" : mountOptions; + } + + public Long getZoneId() { + return zoneId; + } + + public Long getCapacityBytes() { + return capacityBytes; + } + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() { + try { + BackupRepository result = backupRepositoryService.addBackupRepository(this); + if (result != null) { + BackupRepositoryResponse response = _responseGenerator.createBackupRepositoryResponse(result); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add backup repository"); + } + } catch (Exception ex4) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex4.getMessage()); + } + + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/repository/DeleteBackupRepositoryCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/repository/DeleteBackupRepositoryCmd.java new file mode 100644 index 00000000000..912170eb4ca --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/repository/DeleteBackupRepositoryCmd.java @@ -0,0 +1,76 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.user.backup.repository; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.BackupRepositoryResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.backup.BackupRepositoryService; + +import javax.inject.Inject; + +@APICommand(name = "deleteBackupRepository", + description = "delete a backup repository", + responseObject = SuccessResponse.class, since = "4.20.0", + authorized = {RoleType.Admin}) +public class DeleteBackupRepositoryCmd extends BaseCmd { + + @Inject + BackupRepositoryService backupRepositoryService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ID, + type = CommandType.UUID, + entityType = BackupRepositoryResponse.class, + required = true, + description = "ID of the backup repository to be deleted") + private Long id; + + + ///////////////////////////////////////////////////// + //////////////// Accessors ////////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + @Override + public void execute() { + boolean result = backupRepositoryService.deleteBackupRepository(this); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete backup repository"); + } + } + + @Override + public long getEntityOwnerId() { + return 0; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/repository/ListBackupRepositoriesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/repository/ListBackupRepositoriesCmd.java new file mode 100644 index 00000000000..8293afb657d --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/repository/ListBackupRepositoriesCmd.java @@ -0,0 +1,110 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.user.backup.repository; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.utils.Pair; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.BackupRepositoryResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.backup.BackupRepository; +import org.apache.cloudstack.backup.BackupRepositoryService; + +import javax.inject.Inject; +import java.util.ArrayList; +import java.util.List; + +@APICommand(name = "listBackupRepositories", + description = "Lists all backup repositories", + responseObject = BackupRepositoryResponse.class, since = "4.20.0", + authorized = {RoleType.Admin}) +public class ListBackupRepositoriesCmd extends BaseListCmd { + + @Inject + BackupRepositoryService backupRepositoryService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "name of the backup repository") + private String name; + + @Parameter(name = ApiConstants.ZONE_ID, + type = CommandType.UUID, + entityType = ZoneResponse.class, + description = "ID of the zone where the backup repository is to be added") + private Long zoneId; + + @Parameter(name = ApiConstants.PROVIDER, type = CommandType.STRING, description = "the backup repository provider") + private String provider; + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = BackupRepositoryResponse.class, description = "ID of the backup repository") + private Long id; + + ///////////////////////////////////////////////////// + //////////////// Accessors ////////////////////////// + ///////////////////////////////////////////////////// + + + public String getName() { + return name; + } + + public Long getZoneId() { + return zoneId; + } + + public String getProvider() { + return provider; + } + + public Long getId() { + return id; + } + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + try { + Pair, Integer> repositoriesPair = backupRepositoryService.listBackupRepositories(this); + List backupRepositories = repositoriesPair.first(); + ListResponse response = new ListResponse<>(); + List responses = new ArrayList<>(); + for (BackupRepository repository : backupRepositories) { + responses.add(_responseGenerator.createBackupRepositoryResponse(repository)); + } + response.setResponses(responses, repositoriesPair.second()); + response.setResponseName(getCommandName()); + setResponseObject(response); + } catch (Exception e) { + String msg = String.format("Error listing backup repositories, due to: %s", e.getMessage()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, msg); + } + + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/bgp/ListASNumbersCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/bgp/ListASNumbersCmd.java new file mode 100644 index 00000000000..b835f7225df --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/bgp/ListASNumbersCmd.java @@ -0,0 +1,134 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.bgp; + +import com.cloud.bgp.ASNumber; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.utils.Pair; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ASNRangeResponse; +import org.apache.cloudstack.api.response.ASNumberResponse; +import org.apache.cloudstack.api.response.AccountResponse; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.NetworkResponse; +import org.apache.cloudstack.api.response.VpcResponse; +import org.apache.cloudstack.api.response.ZoneResponse; + +import java.util.ArrayList; +import java.util.List; + +@APICommand(name = "listASNumbers", + description = "List Autonomous Systems Numbers", + responseObject = ASNumberResponse.class, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, + since = "4.20.0") +public class ListASNumbersCmd extends BaseListCmd { + + @Parameter(name = ApiConstants.ZONE_ID, type = BaseCmd.CommandType.UUID, entityType = ZoneResponse.class, + description = "the zone ID") + private Long zoneId; + + @Parameter(name = ApiConstants.ASN_RANGE_ID, type = BaseCmd.CommandType.UUID, entityType = ASNRangeResponse.class, + description = "the AS Number range ID") + private Long asNumberRangeId; + + @Parameter(name = ApiConstants.AS_NUMBER, type = CommandType.INTEGER, entityType = ASNumberResponse.class, + description = "AS number") + private Integer asNumber; + + @Parameter(name = ApiConstants.IS_ALLOCATED, type = CommandType.BOOLEAN, + description = "to indicate if the AS number is allocated to any network") + private Boolean allocated; + + @Parameter(name = ApiConstants.NETWORK_ID, type = CommandType.UUID, entityType = NetworkResponse.class, + description = "the network id") + private Long networkId; + + @Parameter(name = ApiConstants.VPC_ID, type = CommandType.UUID, entityType = VpcResponse.class, + description = "the vpc id") + private Long vpcId; + + @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, entityType = AccountResponse.class, + description = "account name") + private String account; + + @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class, + description = "domain id") + private Long domainId; + + public Long getZoneId() { + return zoneId; + } + + public Long getAsNumberRangeId() { + return asNumberRangeId; + } + + public Boolean getAllocated() { + return allocated; + } + + public Integer getAsNumber() { return asNumber; } + + public Long getNetworkId() { + return networkId; + } + + public String getAccount() { + return account; + } + + public Long getDomainId() { + return domainId; + } + + public Long getVpcId() { + return vpcId; + } + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + try { + Pair, Integer> pair = bgpService.listASNumbers(this); + List asNumbers = pair.first(); + ListResponse response = new ListResponse<>(); + List responses = new ArrayList<>(); + for (ASNumber asn : asNumbers) { + responses.add(_responseGenerator.createASNumberResponse(asn)); + } + response.setResponses(responses, pair.second()); + response.setResponseName(getCommandName()); + setResponseObject(response); + } catch (Exception e) { + String msg = String.format("Error listing AS Numbers, due to: %s", e.getMessage()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, msg); + } + + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java index cf25dfaf5b5..0cecbb37020 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java @@ -55,6 +55,7 @@ public class ListCapabilitiesCmd extends BaseCmd { response.setAllowUserExpungeRecoverVM((Boolean)capabilities.get("allowUserExpungeRecoverVM")); response.setAllowUserExpungeRecoverVolume((Boolean)capabilities.get("allowUserExpungeRecoverVolume")); response.setAllowUserViewAllDomainAccounts((Boolean)capabilities.get("allowUserViewAllDomainAccounts")); + response.setAllowUserForceStopVM((Boolean)capabilities.get(ApiConstants.ALLOW_USER_FORCE_STOP_VM)); response.setKubernetesServiceEnabled((Boolean)capabilities.get("kubernetesServiceEnabled")); response.setKubernetesClusterExperimentalFeaturesEnabled((Boolean)capabilities.get("kubernetesClusterExperimentalFeaturesEnabled")); response.setCustomHypervisorDisplayName((String) capabilities.get("customHypervisorDisplayName")); @@ -69,6 +70,8 @@ public class ListCapabilitiesCmd extends BaseCmd { response.setInstancesStatsUserOnly((Boolean) capabilities.get(ApiConstants.INSTANCES_STATS_USER_ONLY)); response.setInstancesDisksStatsRetentionEnabled((Boolean) capabilities.get(ApiConstants.INSTANCES_DISKS_STATS_RETENTION_ENABLED)); response.setInstancesDisksStatsRetentionTime((Integer) capabilities.get(ApiConstants.INSTANCES_DISKS_STATS_RETENTION_TIME)); + response.setSharedFsVmMinCpuCount((Integer)capabilities.get(ApiConstants.SHAREDFSVM_MIN_CPU_COUNT)); + response.setSharedFsVmMinRamSize((Integer)capabilities.get(ApiConstants.SHAREDFSVM_MIN_RAM_SIZE)); response.setObjectName("capability"); response.setResponseName(getCommandName()); this.setResponseObject(response); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java index 24b5a78c085..56c818f832b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java @@ -94,11 +94,35 @@ public class CreateFirewallRuleCmd extends BaseAsyncCreateCmd implements Firewal return ipAddressId; } + public void setIpAddressId(Long ipAddressId) { + this.ipAddressId = ipAddressId; + } + @Override public String getProtocol() { return protocol.trim(); } + public void setProtocol(String protocol) { + this.protocol = protocol; + } + + public Integer getPublicStartPort() { + return publicStartPort; + } + + public void setPublicStartPort(Integer publicStartPort) { + this.publicStartPort = publicStartPort; + } + + public Integer getPublicEndPort() { + return publicEndPort; + } + + public void setPublicEndPort(Integer publicEndPort) { + this.publicEndPort = publicEndPort; + } + @Override public List getSourceCidrList() { if (cidrlist != null) { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/CreateIpv6FirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/CreateIpv6FirewallRuleCmd.java index 4e3cf4621ef..18af5b2973e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/CreateIpv6FirewallRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/CreateIpv6FirewallRuleCmd.java @@ -43,7 +43,7 @@ import com.cloud.user.Account; import com.cloud.utils.net.NetUtils; @APICommand(name = "createIpv6FirewallRule", - description = "Creates an Ipv6 firewall rule in the given network (the network has to belong to VPC)", + description = "Creates an Ipv6 firewall rule in the given network (the network must not belong to VPC)", responseObject = FirewallRuleResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ExtractIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ExtractIsoCmd.java index 5db680066a6..7861c1e5d41 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ExtractIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ExtractIsoCmd.java @@ -120,7 +120,7 @@ public class ExtractIsoCmd extends BaseAsyncCmd { CallContext.current().setEventDetails(getEventDescription()); String uploadUrl = _templateService.extract(this); if (uploadUrl != null) { - ExtractResponse response = _responseGenerator.createExtractResponse(id, zoneId, getEntityOwnerId(), mode, uploadUrl); + ExtractResponse response = _responseGenerator.createImageExtractResponse(id, zoneId, getEntityOwnerId(), mode, uploadUrl); response.setResponseName(getCommandName()); response.setObjectName("iso"); this.setResponseObject(response); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java index 04dcbf8ca96..5c4d606a93c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java @@ -16,6 +16,7 @@ // under the License. package org.apache.cloudstack.api.command.user.iso; +import com.cloud.cpu.CPU; import com.cloud.server.ResourceIcon; import com.cloud.server.ResourceTag; import org.apache.cloudstack.api.response.ResourceIconResponse; @@ -34,6 +35,7 @@ import org.apache.cloudstack.context.CallContext; import com.cloud.template.VirtualMachineTemplate.TemplateFilter; import com.cloud.user.Account; +import org.apache.commons.lang3.StringUtils; import java.util.List; @@ -88,6 +90,11 @@ public class ListIsosCmd extends BaseListTaggedResourcesCmd implements UserCmd { @Parameter(name = ApiConstants.SHOW_RESOURCE_ICON, type = CommandType.BOOLEAN, description = "flag to display the resource image for the isos") private Boolean showIcon; + @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, + description = "the CPU arch of the ISO. Valid options are: x86_64, aarch64", + since = "4.20") + private String arch; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -159,6 +166,13 @@ public class ListIsosCmd extends BaseListTaggedResourcesCmd implements UserCmd { return onlyReady; } + public CPU.CPUArch getArch() { + if (StringUtils.isBlank(arch)) { + return null; + } + return CPU.CPUArch.fromType(arch); + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java index becfdcd653d..81f52552289 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java @@ -18,6 +18,7 @@ package org.apache.cloudstack.api.command.user.iso; import java.util.List; +import com.cloud.cpu.CPU; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.ApiConstants; @@ -118,6 +119,11 @@ public class RegisterIsoCmd extends BaseCmd implements UserCmd { description = "true if password reset feature is supported; default is false") private Boolean passwordEnabled; + @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, + description = "the CPU arch of the ISO. Valid options are: x86_64, aarch64", + since = "4.20") + private String arch; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -229,6 +235,14 @@ public class RegisterIsoCmd extends BaseCmd implements UserCmd { return passwordEnabled == null ? false : passwordEnabled; } + public void setArch(String arch) { + this.arch = arch; + } + + public CPU.CPUArch getArch() { + return CPU.CPUArch.fromType(arch); + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/AssignToLoadBalancerRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/AssignToLoadBalancerRuleCmd.java index 81a52ce2dfe..c4424b1d937 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/AssignToLoadBalancerRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/AssignToLoadBalancerRuleCmd.java @@ -72,7 +72,7 @@ public class AssignToLoadBalancerRuleCmd extends BaseAsyncCmd { @Parameter(name = ApiConstants.VIRTUAL_MACHINE_ID_IP, type = CommandType.MAP, - description = "VM ID and IP map, vmidipmap[0].vmid=1 vmidipmap[0].ip=10.1.1.75", + description = "VM ID and IP map, vmidipmap[0].vmid=1 vmidipmap[0].vmip=10.1.1.75", since = "4.4") private Map vmIdIpMap; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkACLCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkACLCmd.java index 127661b1820..8d8e598bcab 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkACLCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkACLCmd.java @@ -113,6 +113,10 @@ public class CreateNetworkACLCmd extends BaseAsyncCreateCmd { return p; } + public void setProtocol(String protocol) { + this.protocol = protocol; + } + public List getSourceCidrList() { if (cidrlist != null) { return cidrlist; @@ -136,6 +140,9 @@ public class CreateNetworkACLCmd extends BaseAsyncCreateCmd { throw new InvalidParameterValueException("Invalid traffic type " + trafficType); } + public void setTrafficType(String trafficType) { + this.trafficType = trafficType; + } // /////////////////////////////////////////////////// // ///////////// API Implementation/////////////////// // /////////////////////////////////////////////////// @@ -144,15 +151,23 @@ public class CreateNetworkACLCmd extends BaseAsyncCreateCmd { return action; } + public void setAction(String action) { + this.action = action; + } + public Integer getNumber() { return number; } - public Integer getSourcePortStart() { + public Integer getPublicStartPort() { return publicStartPort; } - public Integer getSourcePortEnd() { + public void setPublicStartPort(Integer publicStartPort) { + this.publicStartPort = publicStartPort; + } + + public Integer getPublicEndPort() { if (publicEndPort == null) { if (publicStartPort != null) { return publicStartPort; @@ -164,10 +179,18 @@ public class CreateNetworkACLCmd extends BaseAsyncCreateCmd { return null; } + public void setPublicEndPort(Integer publicEndPort) { + this.publicEndPort = publicEndPort; + } + public Long getNetworkId() { return networkId; } + public void setNetworkId(Long networkId) { + this.networkId = networkId; + } + @Override public long getEntityOwnerId() { Account caller = CallContext.current().getCallingAccount(); @@ -207,6 +230,10 @@ public class CreateNetworkACLCmd extends BaseAsyncCreateCmd { return aclId; } + public void setAclId(Long aclId) { + this.aclId = aclId; + } + public String getReason() { return reason; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java index 2395339a477..aca3d3ca1b4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java @@ -191,6 +191,14 @@ public class CreateNetworkCmd extends BaseCmd implements UserCmd { since = "4.19") private String sourceNatIP; + @Parameter(name = ApiConstants.CIDR_SIZE, type = CommandType.INTEGER, + description = "the CIDR size of IPv4 network. For regular users, this is required for isolated networks with ROUTED mode.", + since = "4.20.0") + private Integer cidrSize; + + @Parameter(name=ApiConstants.AS_NUMBER, type=CommandType.LONG, since = "4.20.0", description="the AS Number of the network") + private Long asNumber; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -364,6 +372,10 @@ public class CreateNetworkCmd extends BaseCmd implements UserCmd { return NetUtils.standardizeIp6Cidr(ip6Cidr); } + public Integer getCidrSize() { + return cidrSize; + } + public Long getAclId() { return aclId; } @@ -391,6 +403,10 @@ public class CreateNetworkCmd extends BaseCmd implements UserCmd { return ip6Dns2; } + public Long getAsNumber() { + return asNumber; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkOfferingsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkOfferingsCmd.java index 33f452008d9..bdc89d804cd 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkOfferingsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkOfferingsCmd.java @@ -110,6 +110,12 @@ public class ListNetworkOfferingsCmd extends BaseListCmd { @Parameter(name = ApiConstants.FOR_VPC, type = CommandType.BOOLEAN, description = "the network offering can be used" + " only for network creation inside the VPC") private Boolean forVpc; + @Parameter(name = ApiConstants.ROUTING_MODE, + type = CommandType.STRING, + description = "the routing mode for the network offering. Supported types are: Static or Dynamic.", + since = "4.20.0") + private String routingMode; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -186,6 +192,8 @@ public class ListNetworkOfferingsCmd extends BaseListCmd { return forVpc; } + public String getRoutingMode() { return routingMode; } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/routing/CreateRoutingFirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/routing/CreateRoutingFirewallRuleCmd.java new file mode 100644 index 00000000000..7146d1ae1d1 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/routing/CreateRoutingFirewallRuleCmd.java @@ -0,0 +1,271 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.network.routing; + +import java.util.ArrayList; +import java.util.List; + +import com.cloud.exception.NetworkRuleConflictException; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCreateCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.FirewallResponse; +import org.apache.cloudstack.api.response.FirewallRuleResponse; +import org.apache.cloudstack.api.response.NetworkResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.commons.lang3.StringUtils; + +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.Network; +import com.cloud.network.rules.FirewallRule; +import com.cloud.user.Account; +import com.cloud.utils.net.NetUtils; + +@APICommand(name = "createRoutingFirewallRule", + description = "Creates a routing firewall rule in the given network in ROUTED mode", + since = "4.20.0", + responseObject = FirewallRuleResponse.class, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class CreateRoutingFirewallRuleCmd extends BaseAsyncCreateCmd { + + + // /////////////////////////////////////////////////// + // ////////////// API parameters ///////////////////// + // /////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.PROTOCOL, type = CommandType.STRING, required = true, description = "the protocol for the firewall rule. Valid values are TCP/UDP/ICMP/ALL or valid protocol number") + private String protocol; + + @Parameter(name = ApiConstants.START_PORT, type = CommandType.INTEGER, description = "the starting port of firewall rule") + private Integer publicStartPort; + + @Parameter(name = ApiConstants.END_PORT, type = CommandType.INTEGER, description = "the ending port of firewall rule") + private Integer publicEndPort; + + @Parameter(name = ApiConstants.CIDR_LIST, type = CommandType.LIST, collectionType = CommandType.STRING, + description = "the source CIDR list to allow traffic from. Multiple entries must be separated by a single comma character (,).") + protected List sourceCidrList; + + @Parameter(name = ApiConstants.DEST_CIDR_LIST, type = CommandType.LIST, collectionType = CommandType.STRING, + description = "the destination CIDR list to allow traffic to. Multiple entries must be separated by a single comma character (,).") + protected List destinationCidrlist; + + @Parameter(name = ApiConstants.ICMP_TYPE, type = CommandType.INTEGER, description = "type of the ICMP message being sent") + private Integer icmpType; + + @Parameter(name = ApiConstants.ICMP_CODE, type = CommandType.INTEGER, description = "error code for this ICMP message") + private Integer icmpCode; + + @Parameter(name = ApiConstants.NETWORK_ID, type = CommandType.UUID, entityType = NetworkResponse.class, + description = "The network of the VM the firewall rule will be created for", required = true) + private Long networkId; + + @Parameter(name = ApiConstants.TRAFFIC_TYPE, type = CommandType.STRING, + description = "the traffic type for the Routing firewall rule, can be ingress or egress, defaulted to ingress if not specified") + private String trafficType; + + @Parameter(name = ApiConstants.FOR_DISPLAY, type = CommandType.BOOLEAN, + description = "an optional field, whether to the display the rule to the end user or not", authorized = {RoleType.Admin}) + private Boolean display; + + // /////////////////////////////////////////////////// + // ///////////////// Accessors /////////////////////// + // /////////////////////////////////////////////////// + + @Override + public boolean isDisplay() { + if (display != null) { + return display; + } else { + return true; + } + } + + public String getProtocol() { + String p = protocol == null ? "" : protocol.trim(); + if (StringUtils.isNumeric(p)) { + int protoNumber = Integer.parseInt(p); + switch (protoNumber) { + case 1: + p = NetUtils.ICMP_PROTO; + break; + case 6: + p = NetUtils.TCP_PROTO; + break; + case 17: + p = NetUtils.UDP_PROTO; + break; + default: + throw new InvalidParameterValueException(String.format("Protocol %d not supported", protoNumber)); + + } + } + return p; + } + + public List getSourceCidrList() { + if (sourceCidrList != null) { + return sourceCidrList; + } else { + List oneCidrList = new ArrayList(); + oneCidrList.add(NetUtils.ALL_IP4_CIDRS); + return oneCidrList; + } + } + + public List getDestinationCidrList() { + if (destinationCidrlist != null) { + return destinationCidrlist; + } else { + List oneCidrList = new ArrayList(); + oneCidrList.add(NetUtils.ALL_IP4_CIDRS); + return oneCidrList; + } + } + + public FirewallRule.TrafficType getTrafficType() { + if (trafficType == null) { + return FirewallRule.TrafficType.Ingress; + } + for (FirewallRule.TrafficType type : FirewallRule.TrafficType.values()) { + if (type.toString().equalsIgnoreCase(trafficType)) { + return type; + } + } + throw new InvalidParameterValueException("Invalid traffic type " + trafficType); + } + + // /////////////////////////////////////////////////// + // ///////////// API Implementation/////////////////// + // /////////////////////////////////////////////////// + + public Integer getSourcePortStart() { + return publicStartPort; + } + + public Integer getSourcePortEnd() { + if (publicEndPort == null) { + if (publicStartPort != null) { + return publicStartPort; + } + } else { + return publicEndPort; + } + + return null; + } + + public Long getNetworkId() { + return networkId; + } + + @Override + public long getEntityOwnerId() { + Network network = _networkService.getNetwork(networkId); + if (network != null) { + return network.getAccountId(); + } + Account owner = CallContext.current().getCallingAccount(); + return owner.getAccountId(); + } + + @Override + public String getEventType() { + return EventTypes.EVENT_ROUTING_IPV4_FIREWALL_RULE_CREATE; + } + + @Override + public String getEventDescription() { + return "Creating ipv4 firewall rule for routed network"; + } + + public Integer getIcmpCode() { + if (icmpCode != null) { + return icmpCode; + } else if (getProtocol().equalsIgnoreCase(NetUtils.ICMP_PROTO)) { + return -1; + } + return null; + } + + public Integer getIcmpType() { + if (icmpType != null) { + return icmpType; + } else if (getProtocol().equalsIgnoreCase(NetUtils.ICMP_PROTO)) { + return -1; + + } + return null; + } + + @Override + public void create() { + try { + FirewallRule result = routedIpv4Manager.createRoutingFirewallRule(this); + setEntityId(result.getId()); + setEntityUuid(result.getUuid()); + } catch (NetworkRuleConflictException e) { + logger.trace("Network Rule Conflict: ", e); + throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, e.getMessage(), e); + } + } + + @Override + public void execute() throws ResourceUnavailableException { + boolean success = false; + FirewallRule rule = _firewallService.getFirewallRule(getEntityId()); + try { + CallContext.current().setEventDetails("Rule ID: " + getEntityId()); + success = routedIpv4Manager.applyRoutingFirewallRule(rule.getId()); + + // State is different after the rule is applied, so get new object here + rule = _firewallService.getFirewallRule(getEntityId()); + FirewallResponse ruleResponse = new FirewallResponse(); + if (rule != null) { + ruleResponse = _responseGenerator.createFirewallResponse(rule); + setResponseObject(ruleResponse); + } + ruleResponse.setResponseName(getCommandName()); + } catch (Exception ex) { + logger.error("Got exception when create Routing firewall rules: " + ex); + } finally { + if (!success || rule == null) { + routedIpv4Manager.revokeRoutingFirewallRule(getEntityId()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create Routing firewall rule"); + } + } + } + + @Override + public Long getApiResourceId() { + return getNetworkId(); + } + + @Override + public ApiCommandResourceType getApiResourceType() { + return ApiCommandResourceType.Network; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/routing/DeleteRoutingFirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/routing/DeleteRoutingFirewallRuleCmd.java new file mode 100644 index 00000000000..16696f5f71b --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/routing/DeleteRoutingFirewallRuleCmd.java @@ -0,0 +1,109 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.network.routing; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.FirewallRuleResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; + +import com.cloud.event.EventTypes; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.rules.FirewallRule; +import com.cloud.user.Account; + +@APICommand(name = "deleteRoutingFirewallRule", + description = "Deletes a routing firewall rule", + since = "4.20.0", + responseObject = SuccessResponse.class, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class DeleteRoutingFirewallRuleCmd extends BaseAsyncCmd { + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = FirewallRuleResponse.class, required = true, description = "the ID of the Routing firewall rule") + private Long id; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + @Override + public String getEventType() { + return EventTypes.EVENT_ROUTING_IPV4_FIREWALL_RULE_DELETE; + } + + @Override + public String getEventDescription() { + return String.format("Deleting ipv4 routing firewall rule ID=%s", id); + } + + @Override + public long getEntityOwnerId() { + FirewallRule rule = _firewallService.getFirewallRule(id); + if (rule != null) { + return rule.getAccountId(); + } + Account caller = CallContext.current().getCallingAccount(); + return caller.getAccountId(); + } + + @Override + public void execute() throws ResourceUnavailableException { + CallContext.current().setEventDetails("Routing firewall rule ID: " + id); + boolean result = routedIpv4Manager.revokeRoutingFirewallRule(id); + + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete Routing firewall rule"); + } + } + + @Override + public Long getApiResourceId() { + FirewallRule rule = _firewallService.getFirewallRule(id); + if (rule != null) { + return rule.getNetworkId(); + } + return null; + } + + @Override + public ApiCommandResourceType getApiResourceType() { + return ApiCommandResourceType.Network; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/routing/ListRoutingFirewallRulesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/routing/ListRoutingFirewallRulesCmd.java new file mode 100644 index 00000000000..3fdf3b0f5b4 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/routing/ListRoutingFirewallRulesCmd.java @@ -0,0 +1,115 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.network.routing; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseListTaggedResourcesCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.command.user.firewall.IListFirewallRulesCmd; +import org.apache.cloudstack.api.response.FirewallResponse; +import org.apache.cloudstack.api.response.FirewallRuleResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.NetworkResponse; + +import com.cloud.network.rules.FirewallRule; +import com.cloud.utils.Pair; + +@APICommand(name = "listRoutingFirewallRules", + description = "Lists all Routing firewall rules", + since = "4.20.0", + responseObject = FirewallRuleResponse.class, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class ListRoutingFirewallRulesCmd extends BaseListTaggedResourcesCmd implements IListFirewallRulesCmd { + + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = FirewallRuleResponse.class, + description = "Lists Routing firewall rule with the specified ID") + private Long id; + + @Parameter(name = ApiConstants.NETWORK_ID, type = CommandType.UUID, entityType = NetworkResponse.class, description = "list Routing firewall rules by network ID") + private Long networkId; + + @Parameter(name = ApiConstants.TRAFFIC_TYPE, type = CommandType.STRING, description = "list Routing firewall rules by traffic type - ingress or egress") + private String trafficType; + + @Parameter(name = ApiConstants.FOR_DISPLAY, type = CommandType.BOOLEAN, description = "list resources by display flag; only ROOT admin is eligible to pass this parameter", authorized = {RoleType.Admin}) + private Boolean display; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + @Override + public Long getNetworkId() { + return networkId; + } + + @Override + public Long getId() { + return id; + } + + @Override + public FirewallRule.TrafficType getTrafficType() { + if (trafficType != null) { + return FirewallRule.TrafficType.valueOf(trafficType); + } + return null; + } + + @Override + public Long getIpAddressId() { + return null; + } + + @Override + public Boolean getDisplay() { + if (display != null) { + return display; + } + return super.getDisplay(); + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() { + Pair, Integer> result = routedIpv4Manager.listRoutingFirewallRules(this); + ListResponse response = new ListResponse<>(); + List ruleResponses = new ArrayList<>(); + + for (FirewallRule rule : result.first()) { + FirewallResponse ruleData = _responseGenerator.createFirewallResponse(rule); + ruleResponses.add(ruleData); + } + response.setResponses(ruleResponses, result.second()); + response.setResponseName(getCommandName()); + setResponseObject(response); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/routing/UpdateRoutingFirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/routing/UpdateRoutingFirewallRuleCmd.java new file mode 100644 index 00000000000..c6f6034b1ba --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/routing/UpdateRoutingFirewallRuleCmd.java @@ -0,0 +1,125 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.network.routing; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseAsyncCustomIdCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.FirewallResponse; +import org.apache.cloudstack.api.response.FirewallRuleResponse; +import org.apache.cloudstack.context.CallContext; + +import com.cloud.event.EventTypes; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.rules.FirewallRule; +import com.cloud.user.Account; + +@APICommand(name = "updateRoutingFirewallRule", + description = "Updates Routing firewall rule with specified ID", + since = "4.20.0", + responseObject = FirewallRuleResponse.class, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class UpdateRoutingFirewallRuleCmd extends BaseAsyncCustomIdCmd { + + + // /////////////////////////////////////////////////// + // ////////////// API parameters ///////////////////// + // /////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = FirewallRuleResponse.class, required = true, description = "the ID of the Routing firewall rule") + private Long id; + + @Parameter(name = ApiConstants.FOR_DISPLAY, type = CommandType.BOOLEAN, description = "an optional field, whether to the display the Routing firewall rule to the end user or not", + authorized = {RoleType.Admin}) + private Boolean display; + + // /////////////////////////////////////////////////// + // ///////////////// Accessors /////////////////////// + // /////////////////////////////////////////////////// + + @Override + public boolean isDisplay() { + if (display != null) { + return display; + } else { + return true; + } + } + + public Long getId() { + return id; + } + + // /////////////////////////////////////////////////// + // ///////////// API Implementation/////////////////// + // /////////////////////////////////////////////////// + + @Override + public long getEntityOwnerId() { + FirewallRule rule = _firewallService.getFirewallRule(id); + if (rule != null) { + return rule.getAccountId(); + } + Account caller = CallContext.current().getCallingAccount(); + return caller.getAccountId(); + } + + @Override + public String getEventType() { + return EventTypes.EVENT_ROUTING_IPV4_FIREWALL_RULE_UPDATE; + } + + @Override + public String getEventDescription() { + return "Updating ipv4 routing firewall rule"; + } + + @Override + public void execute() throws ResourceUnavailableException { + CallContext.current().setEventDetails("Rule Id: " + getId()); + FirewallRule rule = routedIpv4Manager.updateRoutingFirewallRule(this); + FirewallResponse ruleResponse = _responseGenerator.createFirewallResponse(rule); + setResponseObject(ruleResponse); + ruleResponse.setResponseName(getCommandName()); + } + + @Override + public void checkUuid() { + if (this.getCustomId() != null) { + _uuidMgr.checkUuid(this.getCustomId(), FirewallRule.class); + } + } + + @Override + public Long getApiResourceId() { + FirewallRule rule = _firewallService.getFirewallRule(id); + if (rule != null) { + return rule.getNetworkId(); + } + return null; + } + + @Override + public ApiCommandResourceType getApiResourceType() { + return ApiCommandResourceType.Network; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceCountCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceCountCmd.java index 0ea22b38a37..49c6ee605c8 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceCountCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceCountCmd.java @@ -34,8 +34,11 @@ import org.apache.cloudstack.context.CallContext; import com.cloud.configuration.ResourceCount; import com.cloud.user.Account; -@APICommand(name = "updateResourceCount", description = "Recalculate and update resource count for an account or domain.", responseObject = ResourceCountResponse.class, - requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +@APICommand(name = "updateResourceCount", + description = "Recalculate and update resource count for an account or domain. " + + "This also executes some cleanup tasks before calculating resource counts.", + responseObject = ResourceCountResponse.class, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateResourceCountCmd extends BaseCmd { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceLimitCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceLimitCmd.java index 52afd2b1760..3538a389a6e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceLimitCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceLimitCmd.java @@ -62,6 +62,7 @@ public class UpdateResourceLimitCmd extends BaseCmd { + "2 - Volume. Number of disk volumes a user can create. " + "3 - Snapshot. Number of snapshots a user can create. " + "4 - Template. Number of templates that a user can register/create. " + + "5 - Project. Number of projects a user can create. " + "6 - Network. Number of guest network a user can create. " + "7 - VPC. Number of VPC a user can create. " + "8 - CPU. Total number of CPU cores a user can use. " diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/ExtractSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/ExtractSnapshotCmd.java new file mode 100644 index 00000000000..3f0f82ea4e3 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/ExtractSnapshotCmd.java @@ -0,0 +1,115 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.snapshot; + +import com.cloud.event.EventTypes; +import com.cloud.storage.Snapshot; +import com.cloud.user.Account; +import org.apache.cloudstack.acl.SecurityChecker.AccessType; +import org.apache.cloudstack.api.ACL; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ExtractResponse; +import org.apache.cloudstack.api.response.SnapshotResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.context.CallContext; + +@APICommand(name = "extractSnapshot", description = "Returns a download URL for extracting a snapshot. It must be in the Backed Up state.", since = "4.20.0", + responseObject = ExtractResponse.class, entityType = {Snapshot.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +public class ExtractSnapshotCmd extends BaseAsyncCmd { + + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @ACL(accessType = AccessType.OperateEntry) + @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType=SnapshotResponse.class, required=true, since="4.20.0", description="the ID of the snapshot") + private Long id; + + @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, required = true, since="4.20.0", + description = "the ID of the zone where the snapshot is located") + private Long zoneId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public Long getZoneId() { + return zoneId; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public ApiCommandResourceType getApiResourceType() { + return ApiCommandResourceType.Snapshot; + } + + @Override + public Long getApiResourceId() { + return getId(); + } + + /** + * @return ID of the snapshot to extract, if any. Otherwise returns the ACCOUNT_ID_SYSTEM, so ERROR events will be traceable. + */ + @Override + public long getEntityOwnerId() { + Snapshot snapshot = _entityMgr.findById(Snapshot.class, getId()); + if (snapshot != null) { + return snapshot.getAccountId(); + } + + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_SNAPSHOT_EXTRACT; + } + + @Override + public String getEventDescription() { + return "Snapshot extraction job"; + } + + @Override + public void execute() { + CallContext.current().setEventDetails("Snapshot ID: " + this._uuidMgr.getUuid(Snapshot.class, getId())); + String uploadUrl = _snapshotService.extractSnapshot(this); + logger.info("Extract URL [{}] of snapshot [{}].", uploadUrl, id); + if (uploadUrl != null) { + ExtractResponse response = _responseGenerator.createSnapshotExtractResponse(id, zoneId, getEntityOwnerId(), uploadUrl); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to extract snapshot"); + } + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/ChangeSharedFSDiskOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/ChangeSharedFSDiskOfferingCmd.java new file mode 100644 index 00000000000..b078ce4aae9 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/ChangeSharedFSDiskOfferingCmd.java @@ -0,0 +1,145 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.storage.sharedfs; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.user.UserCmd; +import org.apache.cloudstack.api.response.DiskOfferingResponse; +import org.apache.cloudstack.api.response.SharedFSResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.SharedFSService; + +import com.cloud.event.EventTypes; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.user.Account; + +@APICommand(name = "changeSharedFileSystemDiskOffering", + responseObject= SharedFSResponse.class, + description = "Change Disk offering of a Shared FileSystem", + responseView = ResponseObject.ResponseView.Restricted, + entityType = SharedFS.class, + requestHasSensitiveInfo = false, + since = "4.20.0", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class ChangeSharedFSDiskOfferingCmd extends BaseAsyncCmd implements UserCmd { + + @Inject + SharedFSService sharedFSService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, + type = CommandType.UUID, + required = true, + entityType = SharedFSResponse.class, + description = "the ID of the shared filesystem") + private Long id; + + @Parameter(name = ApiConstants.DISK_OFFERING_ID, + type = CommandType.UUID, + entityType = DiskOfferingResponse.class, + description = "the disk offering to use for the underlying storage") + private Long diskOfferingId; + + @Parameter(name = ApiConstants.SIZE, + type = CommandType.LONG, + description = "the size of the shared filesystem in GiB") + private Long size; + + @Parameter(name = ApiConstants.MIN_IOPS, + type = CommandType.LONG, + description = "min iops") + private Long minIops; + + @Parameter(name = ApiConstants.MAX_IOPS, + type = CommandType.LONG, + description = "max iops") + private Long maxIops; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public Long getSize() { + return size; + } + + public Long getDiskOfferingId() { + return diskOfferingId; + } + + public Long getMinIops() { + return minIops; + } + + public Long getMaxIops() { + return maxIops; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getEventType() { + return EventTypes.EVENT_SHAREDFS_CHANGE_DISK_OFFERING; + } + + @Override + public String getEventDescription() { + return "Changing disk offering for the Shared FileSystem " + id; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + @Override + public void execute() throws ResourceAllocationException { + SharedFS sharedFS = sharedFSService.changeSharedFSDiskOffering(this); + if (sharedFS != null) { + ResponseObject.ResponseView respView = getResponseView(); + Account caller = CallContext.current().getCallingAccount(); + if (_accountService.isRootAdmin(caller.getId())) { + respView = ResponseObject.ResponseView.Full; + } + SharedFSResponse response = _responseGenerator.createSharedFSResponse(respView, sharedFS); + response.setObjectName(SharedFS.class.getSimpleName().toLowerCase()); + response.setResponseName(getCommandName()); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to change disk offering for the Shared FileSystem"); + } + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/ChangeSharedFSServiceOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/ChangeSharedFSServiceOfferingCmd.java new file mode 100644 index 00000000000..70fb543d64c --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/ChangeSharedFSServiceOfferingCmd.java @@ -0,0 +1,147 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.storage.sharedfs; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.user.UserCmd; +import org.apache.cloudstack.api.response.SharedFSResponse; +import org.apache.cloudstack.api.response.ServiceOfferingResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.SharedFSService; + +import com.cloud.event.EventTypes; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.ManagementServerException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.VirtualMachineMigrationException; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = "changeSharedFileSystemServiceOffering", + responseObject= SharedFSResponse.class, + description = "Change Service offering of a Shared FileSystem", + responseView = ResponseObject.ResponseView.Restricted, + entityType = SharedFS.class, + requestHasSensitiveInfo = false, + since = "4.20.0", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class ChangeSharedFSServiceOfferingCmd extends BaseAsyncCmd implements UserCmd { + + @Inject + SharedFSService sharedFSService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, + type = CommandType.UUID, + required = true, + entityType = SharedFSResponse.class, + description = "the ID of the shared filesystem") + private Long id; + + @Parameter(name = ApiConstants.SERVICE_OFFERING_ID, + type = CommandType.UUID, + entityType = ServiceOfferingResponse.class, + required = true, + description = "the offering to use for the shared filesystem instance") + private Long serviceOfferingId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public Long getServiceOfferingId() { + return serviceOfferingId; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getEventType() { + return EventTypes.EVENT_SHAREDFS_CHANGE_SERVICE_OFFERING; + } + + @Override + public String getEventDescription() { + return "Changing service offering for the Shared FileSystem " + id; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + private String getExceptionMsg(Exception ex) { + return "Shared FileSystem restart failed with exception" + ex.getMessage(); + } + + @Override + public void execute() { + SharedFS sharedFS; + try { + sharedFS = sharedFSService.changeSharedFSServiceOffering(this); + } catch (ResourceUnavailableException ex) { + logger.warn("Shared FileSystem change service offering exception: ", ex); + throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, getExceptionMsg(ex)); + } catch (InsufficientCapacityException ex) { + logger.warn("Shared FileSystem change service offering exception: ", ex); + throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, getExceptionMsg(ex)); + } catch (OperationTimedoutException ex) { + logger.warn("Shared FileSystem change service offering exception: ", ex); + throw new CloudRuntimeException("Shared FileSystem change service offering timed out due to " + ex.getMessage()); + } catch (ManagementServerException ex) { + logger.warn("Shared FileSystem change service offering exception: ", ex); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } catch (VirtualMachineMigrationException ex) { + logger.warn("Shared FileSystem change service offering exception: ", ex); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + + if (sharedFS != null) { + ResponseObject.ResponseView respView = getResponseView(); + Account caller = CallContext.current().getCallingAccount(); + if (_accountService.isRootAdmin(caller.getId())) { + respView = ResponseObject.ResponseView.Full; + } + SharedFSResponse response = _responseGenerator.createSharedFSResponse(respView, sharedFS); + response.setObjectName(SharedFS.class.getSimpleName().toLowerCase()); + response.setResponseName(getCommandName()); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to change the service offering for the Shared FileSystem"); + } + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/CreateSharedFSCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/CreateSharedFSCmd.java new file mode 100644 index 00000000000..ddaa31612a8 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/CreateSharedFSCmd.java @@ -0,0 +1,304 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.storage.sharedfs; + +import javax.inject.Inject; + +import com.cloud.event.EventTypes; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCreateCmd; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.user.UserCmd; +import org.apache.cloudstack.api.response.DiskOfferingResponse; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.SharedFSResponse; +import org.apache.cloudstack.api.response.NetworkResponse; +import org.apache.cloudstack.api.response.ProjectResponse; +import org.apache.cloudstack.api.response.ServiceOfferingResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.SharedFSProvider; +import org.apache.cloudstack.storage.sharedfs.SharedFSService; + +@APICommand(name = "createSharedFileSystem", + responseObject= SharedFSResponse.class, + description = "Create a new Shared File System of specified size and disk offering, attached to the given network", + responseView = ResponseObject.ResponseView.Restricted, + entityType = SharedFS.class, + requestHasSensitiveInfo = false, + since = "4.20.0", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class CreateSharedFSCmd extends BaseAsyncCreateCmd implements UserCmd { + + @Inject + SharedFSService sharedFSService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.NAME, + type = CommandType.STRING, + required = true, + description = "the name of the shared filesystem.") + private String name; + + @Parameter(name = ApiConstants.ACCOUNT, + type = BaseCmd.CommandType.STRING, + description = "the account associated with the shared filesystem. Must be used with the domainId parameter.") + private String accountName; + + @Parameter(name = ApiConstants.DOMAIN_ID, + type = CommandType.UUID, + entityType = DomainResponse.class, + description = "the domain ID associated with the shared filesystem. If used with the account parameter" + + " returns the shared filesystem associated with the account for the specified domain." + + "If account is NOT provided then the shared filesystem will be assigned to the caller account and domain.") + private Long domainId; + + @Parameter(name = ApiConstants.PROJECT_ID, + type = CommandType.UUID, + entityType = ProjectResponse.class, + description = "the project associated with the shared filesystem. Mutually exclusive with account parameter") + private Long projectId; + + @Parameter(name = ApiConstants.DESCRIPTION, + type = CommandType.STRING, + description = "the description for the shared filesystem.") + private String description; + + @Parameter(name = ApiConstants.SIZE, + type = CommandType.LONG, + description = "the size of the shared filesystem in GiB") + private Long size; + + @Parameter(name = ApiConstants.ZONE_ID, + type = CommandType.UUID, + required = true, + entityType = ZoneResponse.class, + description = "the zone id.") + private Long zoneId; + + @Parameter(name = ApiConstants.DISK_OFFERING_ID, + type = CommandType.UUID, + required = true, + entityType = DiskOfferingResponse.class, + description = "the disk offering to use for the underlying storage. This will define the size and other specifications like encryption and qos for the shared filesystem.") + private Long diskOfferingId; + + @Parameter(name = ApiConstants.MIN_IOPS, + type = CommandType.LONG, + description = "min iops") + private Long minIops; + + @Parameter(name = ApiConstants.MAX_IOPS, + type = CommandType.LONG, + description = "max iops") + private Long maxIops; + + @Parameter(name = ApiConstants.SERVICE_OFFERING_ID, + type = CommandType.UUID, + required = true, + entityType = ServiceOfferingResponse.class, + description = "the service offering to use for the shared filesystem instance hosting the data. The offering should be HA enabled and the cpu count and memory size should be greater than equal to sharedfsvm.min.cpu.count and sharedfsvm.min.ram.size respectively") + private Long serviceOfferingId; + + @Parameter(name = ApiConstants.FILESYSTEM, + type = CommandType.STRING, + required = true, + description = "the filesystem format (XFS / EXT4) which will be installed on the shared filesystem.") + private String fsFormat; + + @Parameter(name = ApiConstants.PROVIDER, + type = CommandType.STRING, + description = "the provider to be used for the shared filesystem. The list of providers can be fetched by using the listSharedFileSystemProviders API.") + private String sharedFSProviderName; + + @Parameter(name = ApiConstants.NETWORK_ID, + type = CommandType.UUID, + required = true, + entityType = NetworkResponse.class, + description = "network to attach the shared filesystem to") + private Long networkId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public String getName() { + return name; + } + + + public Long getProjectId() { + return projectId; + } + + public Long getDomainId() { + return domainId; + } + + public String getAccountName() { + return accountName; + } + public String getDescription() { + return description; + } + + public Long getSize() { + return size; + } + + public Long getZoneId() { + return zoneId; + } + + public Long getDiskOfferingId() { + return diskOfferingId; + } + + public Long getServiceOfferingId() { + return serviceOfferingId; + } + + public Long getMaxIops() { + return maxIops; + } + + public Long getMinIops() { + return minIops; + } + + public String getFsFormat() { + return fsFormat; + } + + public Long getNetworkId() { + return networkId; + } + + public String getSharedFSProviderName() { + if (sharedFSProviderName != null) { + return sharedFSProviderName; + } else { + return SharedFSProvider.SharedFSProviderType.SHAREDFSVM.toString(); + } + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public ApiCommandResourceType getApiResourceType() { + return ApiCommandResourceType.SharedFS; + } + + @Override + public Long getApiResourceId() { + return this.getEntityId(); + } + @Override + public long getEntityOwnerId() { + Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + if (accountId == null) { + return CallContext.current().getCallingAccount().getId(); + } + return accountId; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_SHAREDFS_CREATE; + } + + @Override + public String getEventDescription() { + return "Creating shared filesystem " + name; + } + + private String getCreateExceptionMsg(Exception ex) { + return "Shared FileSystem create failed with exception" + ex.getMessage(); + } + + private String getStartExceptionMsg(Exception ex) { + return "Shared FileSystem start failed with exception: " + ex.getMessage(); + } + + public void create() { + SharedFS sharedFS; + sharedFS = sharedFSService.allocSharedFS(this); + if (sharedFS != null) { + setEntityId(sharedFS.getId()); + setEntityUuid(sharedFS.getUuid()); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create Shared FileSystem"); + } + } + + @Override + public void execute() { + SharedFS sharedFS; + try { + sharedFS = sharedFSService.deploySharedFS(this); + } catch (ResourceUnavailableException ex) { + logger.warn("Shared FileSystem start exception: ", ex); + throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, getStartExceptionMsg(ex)); + } catch (ConcurrentOperationException ex) { + logger.warn("Shared FileSystem start exception: ", ex); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, getStartExceptionMsg(ex)); + } catch (InsufficientCapacityException ex) { + logger.warn("Shared FileSystem start exception: ", ex); + throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, getStartExceptionMsg(ex)); + } catch (ResourceAllocationException ex) { + logger.warn("Shared FileSystem start exception: ", ex); + throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); + } catch (OperationTimedoutException ex) { + throw new CloudRuntimeException("Shared FileSystem start timed out due to " + ex.getMessage()); + } + + if (sharedFS != null) { + ResponseObject.ResponseView respView = getResponseView(); + Account caller = CallContext.current().getCallingAccount(); + if (_accountService.isRootAdmin(caller.getId())) { + respView = ResponseObject.ResponseView.Full; + } + SharedFSResponse response = _responseGenerator.createSharedFSResponse(respView, sharedFS); + response.setObjectName(SharedFS.class.getSimpleName().toLowerCase()); + response.setResponseName(getCommandName()); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to start Shared FileSystem"); + } + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/DestroySharedFSCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/DestroySharedFSCmd.java new file mode 100644 index 00000000000..09fae53f128 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/DestroySharedFSCmd.java @@ -0,0 +1,116 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.storage.sharedfs; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.user.UserCmd; +import org.apache.cloudstack.api.response.SharedFSResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.SharedFSService; + +import javax.inject.Inject; + +import com.cloud.event.EventTypes; + +@APICommand(name = "destroySharedFileSystem", + responseObject= SuccessResponse.class, + description = "Destroy a Shared FileSystem by id", + responseView = ResponseObject.ResponseView.Restricted, + entityType = SharedFS.class, + requestHasSensitiveInfo = false, + since = "4.20.0", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class DestroySharedFSCmd extends BaseAsyncCmd implements UserCmd { + + @Inject + SharedFSService sharedFSService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, + type = CommandType.UUID, + entityType = SharedFSResponse.class, + description = "the ID of the shared filesystem to delete") + private Long id; + + @Parameter(name = ApiConstants.EXPUNGE, + type = CommandType.BOOLEAN, + description = "If true is passed, the shared filesystem is expunged immediately. False by default.") + private Boolean expunge; + + @Parameter(name = ApiConstants.FORCED, + type = CommandType.BOOLEAN, + description = "If true is passed, the shared filesystem can be destroyed without stopping it first.") + private Boolean forced; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public boolean isExpunge() { + return (expunge != null) ? expunge : false; + } + + public boolean isForced() { + return (forced != null) ? forced : false; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getEventType() { + return EventTypes.EVENT_SHAREDFS_DESTROY; + } + + @Override + public String getEventDescription() { + return "Destroying Shared FileSystem " + id; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + @Override + public void execute() { + Boolean result = sharedFSService.destroySharedFS(this); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to destroy Shared FileSystem"); + } + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/ExpungeSharedFSCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/ExpungeSharedFSCmd.java new file mode 100644 index 00000000000..39b99218b66 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/ExpungeSharedFSCmd.java @@ -0,0 +1,96 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.storage.sharedfs; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.user.UserCmd; +import org.apache.cloudstack.api.response.SharedFSResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.SharedFSService; + +import com.cloud.event.EventTypes; + +@APICommand(name = "expungeSharedFileSystem", + responseObject= SuccessResponse.class, + description = "Expunge a Shared FileSystem by id", + responseView = ResponseObject.ResponseView.Restricted, + entityType = SharedFS.class, + requestHasSensitiveInfo = false, + since = "4.20.0", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class ExpungeSharedFSCmd extends BaseAsyncCmd implements UserCmd { + + @Inject + SharedFSService sharedFSService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = SharedFSResponse.class, description = "the ID of the shared filesystem to expunge") + private Long id; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getEventType() { + return EventTypes.EVENT_SHAREDFS_EXPUNGE; + } + + @Override + public String getEventDescription() { + return "Expunging Shared FileSystem " + id; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + @Override + public void execute() { + try { + sharedFSService.deleteSharedFS(id); + } catch (Exception ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to expunge Shared FileSystem"); + } finally { + SuccessResponse response = new SuccessResponse(getCommandName()); + setResponseObject(response); + } + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/ListSharedFSCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/ListSharedFSCmd.java new file mode 100644 index 00000000000..c52c691ac0b --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/ListSharedFSCmd.java @@ -0,0 +1,114 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.storage.sharedfs; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseListRetrieveOnlyResourceCountCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.command.user.UserCmd; +import org.apache.cloudstack.api.response.DiskOfferingResponse; +import org.apache.cloudstack.api.response.SharedFSResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.NetworkResponse; +import org.apache.cloudstack.api.response.ServiceOfferingResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.SharedFSService; + +import javax.inject.Inject; + +@APICommand(name = "listSharedFileSystems", + responseObject= SharedFSResponse.class, + description = "List Shared FileSystems", + responseView = ResponseObject.ResponseView.Restricted, + entityType = SharedFS.class, + requestHasSensitiveInfo = false, + since = "4.20.0", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class ListSharedFSCmd extends BaseListRetrieveOnlyResourceCountCmd implements UserCmd { + + @Inject + SharedFSService sharedFSService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = SharedFSResponse.class, description = "the ID of the shared filesystem") + private Long id; + + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "the name of the shared filesystem") + private String name; + + @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, description = "the ID of the availability zone") + private Long zoneId; + + @Parameter(name = ApiConstants.NETWORK_ID, type = CommandType.UUID, entityType = NetworkResponse.class, description = "the ID of the network") + private Long networkId; + + @Parameter(name = ApiConstants.DISK_OFFERING_ID, type = CommandType.UUID, entityType = DiskOfferingResponse.class, description = "the disk offering of the shared filesystem") + private Long diskOfferingId; + + @Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = ServiceOfferingResponse.class, description = "the service offering of the shared filesystem") + private Long serviceOfferingId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public String getName() { + return name; + } + + public Long getZoneId() { + return zoneId; + } + + public Long getNetworkId() { + return networkId; + } + + public Long getDiskOfferingId() { + return diskOfferingId; + } + + public Long getServiceOfferingId() { + return serviceOfferingId; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + public long getEntityOwnerId() { + return 0; + } + + @Override + public void execute() { + ListResponse response = sharedFSService.searchForSharedFS(getResponseView(), this); + response.setResponseName(getCommandName()); + setResponseObject(response); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/ListSharedFSProvidersCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/ListSharedFSProvidersCmd.java new file mode 100644 index 00000000000..940e07225cf --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/ListSharedFSProvidersCmd.java @@ -0,0 +1,60 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.user.storage.sharedfs; + +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.response.SharedFSProviderResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.storage.sharedfs.SharedFSProvider; +import org.apache.cloudstack.storage.sharedfs.SharedFSService; + +@APICommand(name = "listSharedFileSystemProviders", + responseObject = SharedFSProviderResponse.class, + description = "Lists all available shared filesystem providers.", + requestHasSensitiveInfo = false, + since = "4.20.0", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class ListSharedFSProvidersCmd extends BaseListCmd { + + @Inject + public SharedFSService sharedFSService; + + @Override + public void execute() { + List sharedFSProviders = sharedFSService.getSharedFSProviders(); + final ListResponse response = new ListResponse<>(); + final List responses = new ArrayList<>(); + + for (SharedFSProvider sharedFSProvider : sharedFSProviders) { + SharedFSProviderResponse sharedFSProviderResponse = new SharedFSProviderResponse(); + sharedFSProviderResponse.setName(sharedFSProvider.getName()); + sharedFSProviderResponse.setObjectName("sharedfilesystemprovider"); + responses.add(sharedFSProviderResponse); + } + response.setResponses(responses, responses.size()); + response.setResponseName(this.getCommandName()); + setResponseObject(response); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/RecoverSharedFSCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/RecoverSharedFSCmd.java new file mode 100644 index 00000000000..6e5bbaa4d8a --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/RecoverSharedFSCmd.java @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.storage.sharedfs; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.user.UserCmd; +import org.apache.cloudstack.api.response.SharedFSResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.SharedFSService; + +@APICommand(name = "recoverSharedFileSystem", + responseObject= SuccessResponse.class, + description = "Recover a Shared FileSystem by id", + responseView = ResponseObject.ResponseView.Restricted, + entityType = SharedFS.class, + requestHasSensitiveInfo = false, + since = "4.20.0", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class RecoverSharedFSCmd extends BaseCmd implements UserCmd { + + @Inject + SharedFSService sharedFSService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = SharedFSResponse.class, description = "the ID of the shared filesystem to recover") + private Long id; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + @Override + public void execute() { + SharedFS sharedFS = sharedFSService.recoverSharedFS(id); + if (sharedFS != null) { + SuccessResponse response = new SuccessResponse(getCommandName()); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to recover Shared FileSystem"); + } + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/RestartSharedFSCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/RestartSharedFSCmd.java new file mode 100644 index 00000000000..576c472b6eb --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/RestartSharedFSCmd.java @@ -0,0 +1,145 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.storage.sharedfs; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.user.UserCmd; +import org.apache.cloudstack.api.response.SharedFSResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.SharedFSService; + +import com.cloud.event.EventTypes; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = "restartSharedFileSystem", + responseObject= SuccessResponse.class, + description = "Restart a Shared FileSystem", + responseView = ResponseObject.ResponseView.Restricted, + entityType = SharedFS.class, + requestHasSensitiveInfo = false, + since = "4.20.0", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class RestartSharedFSCmd extends BaseAsyncCmd implements UserCmd { + + @Inject + SharedFSService sharedFSService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, + type = CommandType.UUID, + required = true, + entityType = SharedFSResponse.class, + description = "the ID of the shared filesystem") + private Long id; + + @Parameter(name = ApiConstants.CLEANUP, + type = CommandType.BOOLEAN, + description = "is cleanup required") + private boolean cleanup; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public Boolean getCleanup() { + return cleanup; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getEventType() { + return EventTypes.EVENT_SHAREDFS_RESTART; + } + + @Override + public String getEventDescription() { + return "Restarting Shared FileSystem " + id; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + private String getRestartExceptionMsg(Exception ex) { + return "Shared FileSystem restart failed with exception" + ex.getMessage(); + } + + @Override + public void execute() { + SharedFS sharedFS; + try { + sharedFS = sharedFSService.restartSharedFS(this.getId(), this.getCleanup()); + } catch (ResourceUnavailableException ex) { + logger.warn("Shared FileSystem restart exception: ", ex); + throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, getRestartExceptionMsg(ex)); + } catch (ConcurrentOperationException ex) { + logger.warn("Shared FileSystem restart exception: ", ex); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, getRestartExceptionMsg(ex)); + } catch (InsufficientCapacityException ex) { + logger.warn("Shared FileSystem restart exception: ", ex); + throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, getRestartExceptionMsg(ex)); + } catch (ResourceAllocationException ex) { + logger.warn("Shared FileSystem restart exception: ", ex); + throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); + } catch (OperationTimedoutException ex) { + logger.warn("Shared FileSystem restart exception: ", ex); + throw new CloudRuntimeException("Shared FileSystem start timed out due to " + ex.getMessage()); + } + + if (sharedFS != null) { + ResponseObject.ResponseView respView = getResponseView(); + Account caller = CallContext.current().getCallingAccount(); + if (_accountService.isRootAdmin(caller.getId())) { + respView = ResponseObject.ResponseView.Full; + } + SharedFSResponse response = _responseGenerator.createSharedFSResponse(respView, sharedFS); + response.setObjectName(SharedFS.class.getSimpleName().toLowerCase()); + response.setResponseName(getCommandName()); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to restart Shared FileSystem"); + } + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/StartSharedFSCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/StartSharedFSCmd.java new file mode 100644 index 00000000000..bd384aceef7 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/StartSharedFSCmd.java @@ -0,0 +1,135 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.storage.sharedfs; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.user.UserCmd; +import org.apache.cloudstack.api.response.SharedFSResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.SharedFSService; + +import com.cloud.event.EventTypes; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = "startSharedFileSystem", + responseObject= SharedFSResponse.class, + description = "Start a Shared FileSystem", + responseView = ResponseObject.ResponseView.Restricted, + entityType = SharedFS.class, + requestHasSensitiveInfo = false, + since = "4.20.0", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class StartSharedFSCmd extends BaseAsyncCmd implements UserCmd { + + @Inject + SharedFSService sharedFSService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, + type = CommandType.UUID, + required = true, + entityType = SharedFSResponse.class, + description = "the ID of the shared filesystem") + private Long id; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + @Override + public String getEventDescription() { + return "Starting Shared FileSystem " + id; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_SHAREDFS_START; + } + + private String getStartExceptionMsg(Exception ex) { + return "Shared FileSystem start failed with exception: " + ex.getMessage(); + } + + @Override + public void execute() { + SharedFS sharedFS; + try { + sharedFS = sharedFSService.startSharedFS(this.getId()); + } catch (ResourceUnavailableException ex) { + logger.warn("Shared FileSystem start exception: ", ex); + throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, getStartExceptionMsg(ex)); + } catch (ConcurrentOperationException ex) { + logger.warn("Shared FileSystem start exception: ", ex); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, getStartExceptionMsg(ex)); + } catch (InsufficientCapacityException ex) { + logger.warn("Shared FileSystem start exception: ", ex); + throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, getStartExceptionMsg(ex)); + } catch (ResourceAllocationException ex) { + logger.warn("Shared FileSystem start exception: ", ex); + throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); + } catch (OperationTimedoutException ex) { + logger.warn("Shared FileSystem start exception: ", ex); + throw new CloudRuntimeException("Shared FileSystem start timed out due to " + ex.getMessage()); + } + + if (sharedFS != null) { + ResponseObject.ResponseView respView = getResponseView(); + Account caller = CallContext.current().getCallingAccount(); + if (_accountService.isRootAdmin(caller.getId())) { + respView = ResponseObject.ResponseView.Full; + } + SharedFSResponse response = _responseGenerator.createSharedFSResponse(respView, sharedFS); + response.setObjectName(SharedFS.class.getSimpleName().toLowerCase()); + response.setResponseName(getCommandName()); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to start Shared FileSystem"); + } + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/StopSharedFSCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/StopSharedFSCmd.java new file mode 100644 index 00000000000..d6e0737144a --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/StopSharedFSCmd.java @@ -0,0 +1,115 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.storage.sharedfs; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.user.UserCmd; +import org.apache.cloudstack.api.response.SharedFSResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.SharedFSService; + +import com.cloud.event.EventTypes; +import com.cloud.user.Account; + +@APICommand(name = "stopSharedFileSystem", + responseObject= SharedFSResponse.class, + description = "Stop a Shared FileSystem", + responseView = ResponseObject.ResponseView.Restricted, + entityType = SharedFS.class, + requestHasSensitiveInfo = false, + since = "4.20.0", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class StopSharedFSCmd extends BaseAsyncCmd implements UserCmd { + + @Inject + SharedFSService sharedFSService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, + type = CommandType.UUID, + required = true, + entityType = SharedFSResponse.class, + description = "the ID of the shared filesystem") + private Long id; + + @Parameter(name = ApiConstants.FORCED, + type = CommandType.BOOLEAN, + description = "Force stop the shared filesystem.") + private Boolean forced; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public boolean isForced() { + return (forced != null) ? forced : false; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + @Override + public String getEventType() { + return EventTypes.EVENT_SHAREDFS_STOP; + } + + @Override + public String getEventDescription() { + return "Stopping Shared FileSystem " + id; + } + + @Override + public void execute() { + SharedFS sharedFS = sharedFSService.stopSharedFS(this.getId(), this.isForced()); + if (sharedFS != null) { + ResponseObject.ResponseView respView = getResponseView(); + Account caller = CallContext.current().getCallingAccount(); + if (_accountService.isRootAdmin(caller.getId())) { + respView = ResponseObject.ResponseView.Full; + } + SharedFSResponse response = _responseGenerator.createSharedFSResponse(respView, sharedFS); + response.setObjectName(SharedFS.class.getSimpleName().toLowerCase()); + response.setResponseName(getCommandName()); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to stop Shared FileSystem"); + } + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/UpdateSharedFSCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/UpdateSharedFSCmd.java new file mode 100644 index 00000000000..daad6cc78c5 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/UpdateSharedFSCmd.java @@ -0,0 +1,113 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.storage.sharedfs; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.user.UserCmd; +import org.apache.cloudstack.api.response.SharedFSResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.SharedFSService; + +import javax.inject.Inject; + +import com.cloud.user.Account; + +@APICommand(name = "updateSharedFileSystem", + responseObject= SharedFSResponse.class, + description = "Update a Shared FileSystem", + responseView = ResponseObject.ResponseView.Restricted, + entityType = SharedFS.class, + requestHasSensitiveInfo = false, + since = "4.20.0", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class UpdateSharedFSCmd extends BaseCmd implements UserCmd { + + @Inject + SharedFSService sharedFSService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, + type = CommandType.UUID, + required = true, + entityType = SharedFSResponse.class, + description = "the ID of the shared filesystem") + private Long id; + + @Parameter(name = ApiConstants.NAME, + type = CommandType.STRING, + description = "the name of the shared filesystem.") + private String name; + + @Parameter(name = ApiConstants.DESCRIPTION, + type = CommandType.STRING, + description = "the description for the shared filesystem.") + private String description; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public String getName() { + return name; + } + + public String getDescription() { + return description; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccount().getId(); + } + + @Override + public void execute() { + SharedFS sharedFS = sharedFSService.updateSharedFS(this); + if (sharedFS != null) { + ResponseObject.ResponseView respView = getResponseView(); + Account caller = CallContext.current().getCallingAccount(); + if (_accountService.isRootAdmin(caller.getId())) { + respView = ResponseObject.ResponseView.Full; + } + SharedFSResponse response = _responseGenerator.createSharedFSResponse(respView, sharedFS); + response.setObjectName(SharedFS.class.getSimpleName().toLowerCase()); + response.setResponseName(getCommandName()); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update Shared FileSystem"); + } + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/DeleteTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/DeleteTemplateCmd.java index 245baf1e07e..5e9bf317fe1 100755 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/DeleteTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/DeleteTemplateCmd.java @@ -18,6 +18,7 @@ package org.apache.cloudstack.api.command.user.template; import org.apache.commons.lang3.BooleanUtils; +import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.ApiConstants; @@ -53,6 +54,9 @@ public class DeleteTemplateCmd extends BaseAsyncCmd { @Parameter(name = ApiConstants.FORCED, type = CommandType.BOOLEAN, required = false, description = "Force delete a template.", since = "4.9+") private Boolean forced; + @Parameter(name = ApiConstants.IS_SYSTEM, type = CommandType.BOOLEAN, required = false, description = "Necessary if the template's type is system.", since = "4.20.0", authorized = {RoleType.Admin}) + private Boolean isSystem; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -69,6 +73,10 @@ public class DeleteTemplateCmd extends BaseAsyncCmd { return BooleanUtils.toBooleanDefaultIfNull(forced, false); } + public boolean getIsSystem() { + return BooleanUtils.toBooleanDefaultIfNull(isSystem, false); + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/ExtractTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/ExtractTemplateCmd.java index ce6ba5e300c..0fa0679bfd9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/ExtractTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/ExtractTemplateCmd.java @@ -120,8 +120,9 @@ public class ExtractTemplateCmd extends BaseAsyncCmd { CallContext.current().setEventDetails(getEventDescription()); String uploadUrl = _templateService.extract(this); if (uploadUrl != null) { - ExtractResponse response = _responseGenerator.createExtractResponse(id, zoneId, getEntityOwnerId(), mode, uploadUrl); + ExtractResponse response = _responseGenerator.createImageExtractResponse(id, zoneId, getEntityOwnerId(), mode, uploadUrl); response.setResponseName(getCommandName()); + response.setObjectName("template"); this.setResponseObject(response); } else { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to extract template"); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/GetUploadParamsForTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/GetUploadParamsForTemplateCmd.java index eef5c74bc40..330224a6055 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/GetUploadParamsForTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/GetUploadParamsForTemplateCmd.java @@ -22,6 +22,7 @@ import java.net.MalformedURLException; import java.util.Collection; import java.util.Map; +import com.cloud.cpu.CPU; import com.cloud.hypervisor.Hypervisor; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -55,6 +56,11 @@ public class GetUploadParamsForTemplateCmd extends AbstractGetUploadParamsCmd { description = "the ID of the OS Type that best represents the OS of this template. Not required for VMware as the guest OS is obtained from the OVF file.") private Long osTypeId; + @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, + description = "the CPU arch of the template. Valid options are: x86_64, aarch64", + since = "4.20") + private String arch; + @Parameter(name = ApiConstants.BITS, type = CommandType.INTEGER, description = "32 or 64 bits support. 64 by default") private Integer bits; @@ -95,7 +101,7 @@ public class GetUploadParamsForTemplateCmd extends AbstractGetUploadParamsCmd { @Parameter(name=ApiConstants.FOR_CKS, type = CommandType.BOOLEAN, - description = "if true, the templates would be available for deploying CKS clusters", since = "4.20.0") + description = "if true, the templates would be available for deploying CKS clusters", since = "4.21.0") protected Boolean forCks; public String getDisplayText() { @@ -171,6 +177,10 @@ public class GetUploadParamsForTemplateCmd extends AbstractGetUploadParamsCmd { return Boolean.TRUE.equals(forCks); } + public CPU.CPUArch getArch() { + return CPU.CPUArch.fromType(arch); + } + @Override public void execute() throws ServerApiException { validateRequest(); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java index e9fdc2d1427..8bd0c05401f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java @@ -16,6 +16,7 @@ // under the License. package org.apache.cloudstack.api.command.user.template; +import com.cloud.cpu.CPU; import com.cloud.exception.InvalidParameterValueException; import com.cloud.server.ResourceIcon; import com.cloud.server.ResourceTag; @@ -41,6 +42,7 @@ import org.apache.cloudstack.context.CallContext; import com.cloud.template.VirtualMachineTemplate; import com.cloud.template.VirtualMachineTemplate.TemplateFilter; import com.cloud.user.Account; +import org.apache.commons.lang3.StringUtils; @APICommand(name = "listTemplates", description = "List all public, private, and privileged templates.", responseObject = TemplateResponse.class, entityType = {VirtualMachineTemplate.class}, responseView = ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) @@ -106,9 +108,14 @@ public class ListTemplatesCmd extends BaseListTaggedResourcesCmd implements User @Parameter(name = ApiConstants.FOR_CKS, type = CommandType.BOOLEAN, description = "list templates that can be used to deploy CKS clusters", - since = "4.20.0") + since = "4.21.0") private Boolean forCks; + @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, + description = "the CPU arch of the template. Valid options are: x86_64, aarch64", + since = "4.20") + private String arch; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -198,6 +205,13 @@ public class ListTemplatesCmd extends BaseListTaggedResourcesCmd implements User public Boolean getForCks() { return forCks; } + public CPU.CPUArch getArch() { + if (StringUtils.isBlank(arch)) { + return null; + } + return CPU.CPUArch.fromType(arch); + } + @Override public String getCommandName() { return s_name; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java index d1192dbc608..6dc51272fad 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java @@ -16,6 +16,7 @@ // under the License. package org.apache.cloudstack.api.command.user.template; +import com.cloud.cpu.CPU; import com.cloud.hypervisor.Hypervisor; import java.net.URISyntaxException; import java.util.ArrayList; @@ -177,6 +178,11 @@ public class RegisterTemplateCmd extends BaseCmd implements UserCmd { since = "4.19.0") private String templateType; + @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, + description = "the CPU arch of the template. Valid options are: x86_64, aarch64", + since = "4.20") + private String arch; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -302,6 +308,10 @@ public class RegisterTemplateCmd extends BaseCmd implements UserCmd { return templateType; } + public CPU.CPUArch getArch() { + return CPU.CPUArch.fromType(arch); + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmd.java index 8df25541a19..41d865d678c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmd.java @@ -72,7 +72,14 @@ public class RegisterUserDataCmd extends BaseCmd { @Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class, description = "an optional project for the userdata") private Long projectId; - @Parameter(name = ApiConstants.USER_DATA, type = CommandType.STRING, required = true, description = "Userdata content", length = 1048576) + @Parameter(name = ApiConstants.USER_DATA, + type = CommandType.STRING, + required = true, + description = "Base64 encoded userdata content. " + + "Using HTTP GET (via querystring), you can send up to 4KB of data after base64 encoding. " + + "Using HTTP POST (via POST body), you can send up to 1MB of data after base64 encoding. " + + "You also need to change vm.userdata.max.length value", + length = 1048576) private String userData; @Parameter(name = ApiConstants.PARAMS, type = CommandType.STRING, description = "comma separated list of variables declared in userdata content") diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/AddIpToVmNicCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/AddIpToVmNicCmd.java index 0dc3dcdbdcc..e76a75ae398 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/AddIpToVmNicCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/AddIpToVmNicCmd.java @@ -79,7 +79,7 @@ public class AddIpToVmNicCmd extends BaseAsyncCreateCmd { private boolean isZoneSGEnabled() { Network ntwk = _entityMgr.findById(Network.class, getNetworkId()); DataCenter dc = _entityMgr.findById(DataCenter.class, ntwk.getDataCenterId()); - return dc.isSecurityGroupEnabled(); + return dc.isSecurityGroupEnabled() || _ntwkModel.isSecurityGroupSupportedForZone(dc.getId()); } @Override diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CreateVMScheduleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CreateVMScheduleCmd.java index 935f39bf4dd..5811eb1abfc 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CreateVMScheduleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CreateVMScheduleCmd.java @@ -68,7 +68,7 @@ public class CreateVMScheduleCmd extends BaseCmd { @Parameter(name = ApiConstants.ACTION, type = CommandType.STRING, required = true, - description = "Action to take on the VM (start/stop/restart/force_stop/force_reboot).") + description = "Action to take on the VM (start/stop/reboot/force_stop/force_reboot).") private String action; @Parameter(name = ApiConstants.START_DATE, diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java index 446bdf30f07..52d42a95d98 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java @@ -31,6 +31,7 @@ import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.affinity.AffinityGroupResponse; import org.apache.cloudstack.api.ACL; import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiArgValidator; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiConstants.IoDriverPolicy; @@ -97,7 +98,7 @@ public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityG @Parameter(name = ApiConstants.TEMPLATE_ID, type = CommandType.UUID, entityType = TemplateResponse.class, required = true, description = "the ID of the template for the virtual machine") private Long templateId; - @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "host name for the virtual machine") + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "host name for the virtual machine", validations = {ApiArgValidator.RFCComplianceDomainName}) private String name; @Parameter(name = ApiConstants.DISPLAY_NAME, type = CommandType.STRING, description = "an optional user generated name for the virtual machine") @@ -154,7 +155,11 @@ public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityG private String hypervisor; @Parameter(name = ApiConstants.USER_DATA, type = CommandType.STRING, - description = "an optional binary data that can be sent to the virtual machine upon a successful deployment. This binary data must be base64 encoded before adding it to the request. Using HTTP GET (via querystring), you can send up to 4KB of data after base64 encoding. Using HTTP POST(via POST body), you can send up to 1MB of data after base64 encoding.", + description = "an optional binary data that can be sent to the virtual machine upon a successful deployment. " + + "This binary data must be base64 encoded before adding it to the request. " + + "Using HTTP GET (via querystring), you can send up to 4KB of data after base64 encoding. " + + "Using HTTP POST (via POST body), you can send up to 1MB of data after base64 encoding. " + + "You also need to change vm.userdata.max.length value", length = 1048576) private String userData; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/GetVMPasswordCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/GetVMPasswordCmd.java index ce6114c7fd8..11534fd4375 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/GetVMPasswordCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/GetVMPasswordCmd.java @@ -16,9 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.vm; -import java.security.InvalidParameterException; - - import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; import org.apache.cloudstack.api.APICommand; @@ -28,6 +25,7 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.GetVMPasswordResponse; import org.apache.cloudstack.api.response.UserVmResponse; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.user.Account; import com.cloud.uservm.UserVm; import com.cloud.vm.VirtualMachine; @@ -61,7 +59,7 @@ public class GetVMPasswordCmd extends BaseCmd { public void execute() { String passwd = _mgr.getVMPassword(this); if (passwd == null || passwd.equals("")) - throw new InvalidParameterException("No password for VM with id '" + getId() + "' found."); + throw new InvalidParameterValueException("No password for VM with id '" + getId() + "' found."); setResponseObject(new GetVMPasswordResponse(getCommandName(), passwd)); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ListVMsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ListVMsCmd.java index 2d1160fb7a7..50e1798112d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ListVMsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ListVMsCmd.java @@ -16,9 +16,10 @@ // under the License. package org.apache.cloudstack.api.command.user.vm; -import java.util.ArrayList; import java.util.EnumSet; +import java.util.HashSet; import java.util.List; +import java.util.Set; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.affinity.AffinityGroupResponse; @@ -45,6 +46,7 @@ import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VpcResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.commons.lang3.BooleanUtils; +import org.apache.commons.collections.CollectionUtils; import com.cloud.exception.InvalidParameterValueException; import com.cloud.server.ResourceIcon; @@ -56,7 +58,6 @@ import com.cloud.vm.VirtualMachine; requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class ListVMsCmd extends BaseListRetrieveOnlyResourceCountCmd implements UserCmd { - private static final String s_name = "listvirtualmachinesresponse"; ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -96,7 +97,8 @@ public class ListVMsCmd extends BaseListRetrieveOnlyResourceCountCmd implements collectionType = CommandType.STRING, description = "comma separated list of vm details requested, " + "value can be a list of [all, group, nics, stats, secgrp, tmpl, servoff, diskoff, backoff, iso, volume, min, affgrp]." - + " If no parameter is passed in, the details will be defaulted to all") + + " When no parameters are passed, all the details are returned if list.vm.default.details.stats is true (default)," + + " otherwise when list.vm.default.details.stats is false the API response will exclude the stats details.") private List viewDetails; @Parameter(name = ApiConstants.TEMPLATE_ID, type = CommandType.UUID, entityType = TemplateResponse.class, description = "list vms by template") @@ -237,22 +239,32 @@ public class ListVMsCmd extends BaseListRetrieveOnlyResourceCountCmd implements return autoScaleVmGroupId; } + protected boolean isViewDetailsEmpty() { + return CollectionUtils.isEmpty(viewDetails); + } + public EnumSet getDetails() throws InvalidParameterValueException { - EnumSet dv; - if (viewDetails == null || viewDetails.size() <= 0) { - dv = EnumSet.of(VMDetails.all); - } else { - try { - ArrayList dc = new ArrayList(); - for (String detail : viewDetails) { - dc.add(VMDetails.valueOf(detail)); - } - dv = EnumSet.copyOf(dc); - } catch (IllegalArgumentException e) { - throw new InvalidParameterValueException("The details parameter contains a non permitted value. The allowed values are " + EnumSet.allOf(VMDetails.class)); + if (isViewDetailsEmpty()) { + if (_queryService.ReturnVmStatsOnVmList.value()) { + return EnumSet.of(VMDetails.all); } + + Set allDetails = new HashSet<>(Set.of(VMDetails.values())); + allDetails.remove(VMDetails.stats); + allDetails.remove(VMDetails.all); + return EnumSet.copyOf(allDetails); + } + + try { + Set dc = new HashSet<>(); + for (String detail : viewDetails) { + dc.add(VMDetails.valueOf(detail)); + } + + return EnumSet.copyOf(dc); + } catch (IllegalArgumentException e) { + throw new InvalidParameterValueException("The details parameter contains a non permitted value. The allowed values are " + EnumSet.allOf(VMDetails.class)); } - return dv; } @Override @@ -275,10 +287,6 @@ public class ListVMsCmd extends BaseListRetrieveOnlyResourceCountCmd implements ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// - @Override - public String getCommandName() { - return s_name; - } @Override public ApiCommandResourceType getApiResourceType() { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ListVnfAppliancesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ListVnfAppliancesCmd.java new file mode 100644 index 00000000000..e7bbb97d78c --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ListVnfAppliancesCmd.java @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.vm; + +import com.cloud.vm.VirtualMachine; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ResponseObject.ResponseView; +import org.apache.cloudstack.api.command.user.UserCmd; + +import org.apache.cloudstack.api.response.UserVmResponse; + +@APICommand(name = "listVnfAppliances", description = "List VNF appliance owned by the account.", + responseObject = UserVmResponse.class, + responseView = ResponseView.Restricted, entityType = {VirtualMachine.class}, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, + since = "4.19.1") +public class ListVnfAppliancesCmd extends ListVMsCmd implements UserCmd { + + @Override + public Boolean getVnf() { + return true; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RemoveIpFromVmNicCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RemoveIpFromVmNicCmd.java index a4cd6159dfc..2f53c3d4e4c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RemoveIpFromVmNicCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RemoveIpFromVmNicCmd.java @@ -127,7 +127,7 @@ public class RemoveIpFromVmNicCmd extends BaseAsyncCmd { private boolean isZoneSGEnabled() { Network ntwk = _entityMgr.findById(Network.class, getNetworkId()); DataCenter dc = _entityMgr.findById(DataCenter.class, ntwk.getDataCenterId()); - return dc.isSecurityGroupEnabled(); + return dc.isSecurityGroupEnabled() || _ntwkModel.isSecurityGroupSupportedForZone(dc.getId()); } @Override diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMUserDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMUserDataCmd.java index 089dfaecf94..0ecf4ff1384 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMUserDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMUserDataCmd.java @@ -60,7 +60,7 @@ public class ResetVMUserDataCmd extends BaseCmd implements UserCmd { description = "an optional binary data that can be sent to the virtual machine upon a successful deployment. " + "This binary data must be base64 encoded before adding it to the request. " + "Using HTTP GET (via querystring), you can send up to 4KB of data after base64 encoding. " + - "Using HTTP POST(via POST body), you can send up to 1MB of data after base64 encoding." + + "Using HTTP POST (via POST body), you can send up to 1MB of data after base64 encoding. " + "You also need to change vm.userdata.max.length value", length = 1048576) private String userData; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/StartVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/StartVMCmd.java index 8bc4f0ff3b1..c0311d59997 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/StartVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/StartVMCmd.java @@ -102,6 +102,10 @@ public class StartVMCmd extends BaseAsyncCmd implements UserCmd { return id; } + public void setId(Long id) { + this.id = id; + } + public Long getHostId() { return hostId; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java index 9f72ac17c8f..0f5dade96d2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java @@ -22,6 +22,8 @@ import java.util.List; import java.util.Map; import com.cloud.utils.exception.CloudRuntimeException; + +import org.apache.cloudstack.api.ApiArgValidator; import org.apache.cloudstack.api.response.UserDataResponse; import org.apache.cloudstack.acl.RoleType; @@ -84,7 +86,7 @@ public class UpdateVMCmd extends BaseCustomIdCmd implements SecurityGroupAction, description = "an optional binary data that can be sent to the virtual machine upon a successful deployment. " + "This binary data must be base64 encoded before adding it to the request. " + "Using HTTP GET (via querystring), you can send up to 4KB of data after base64 encoding. " + - "Using HTTP POST(via POST body), you can send up to 1MB of data after base64 encoding." + + "Using HTTP POST (via POST body), you can send up to 1MB of data after base64 encoding. " + "You also need to change vm.userdata.max.length value", length = 1048576, since = "4.16.0") @@ -104,7 +106,7 @@ public class UpdateVMCmd extends BaseCustomIdCmd implements SecurityGroupAction, description = "true if VM contains XS/VMWare tools inorder to support dynamic scaling of VM cpu/memory. This can be updated only when dynamic scaling is enabled on template, service offering and the corresponding global setting") protected Boolean isDynamicallyScalable; - @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "new host name of the vm. The VM has to be stopped/started for this update to take affect", since = "4.4") + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "new host name of the vm. The VM has to be stopped/started for this update to take affect", validations = {ApiArgValidator.RFCComplianceDomainName}, since = "4.4") private String name; @Parameter(name = ApiConstants.INSTANCE_NAME, type = CommandType.STRING, description = "instance name of the user vm", since = "4.4", authorized = {RoleType.Admin}) @@ -141,9 +143,17 @@ public class UpdateVMCmd extends BaseCustomIdCmd implements SecurityGroupAction, + " Example: dhcpoptionsnetworklist[0].dhcp:114=url&dhcpoptionsetworklist[0].networkid=networkid&dhcpoptionsetworklist[0].dhcp:66=www.test.com") private Map dhcpOptionsNetworkList; - @Parameter(name = ApiConstants.EXTRA_CONFIG, type = CommandType.STRING, since = "4.12", description = "an optional URL encoded string that can be passed to the virtual machine upon successful deployment", authorized = { RoleType.Admin }, length = 5120) + @Parameter(name = ApiConstants.EXTRA_CONFIG, type = CommandType.STRING, since = "4.12", description = "an optional URL encoded string that can be passed to the virtual machine upon successful deployment", length = 5120) private String extraConfig; + @Parameter(name = ApiConstants.DELETE_PROTECTION, + type = CommandType.BOOLEAN, since = "4.20.0", + description = "Set delete protection for the virtual machine. If " + + "true, the instance will be protected from deletion. " + + "Note: If the instance is managed by another service like" + + " autoscaling groups or CKS, delete protection will be ignored.") + private Boolean deleteProtection; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -213,6 +223,10 @@ public class UpdateVMCmd extends BaseCustomIdCmd implements SecurityGroupAction, return cleanupDetails == null ? false : cleanupDetails.booleanValue(); } + public Boolean getDeleteProtection() { + return deleteProtection; + } + public Map> getDhcpOptionsMap() { Map> dhcpOptionsMap = new HashMap<>(); if (dhcpOptionsNetworkList != null && !dhcpOptionsNetworkList.isEmpty()) { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ExtractVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ExtractVolumeCmd.java index 1146f80f0e2..9445aba23c0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ExtractVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ExtractVolumeCmd.java @@ -31,9 +31,7 @@ import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; -import com.cloud.dc.DataCenter; import com.cloud.event.EventTypes; -import com.cloud.storage.Upload; import com.cloud.storage.Volume; import com.cloud.user.Account; @@ -124,20 +122,8 @@ public class ExtractVolumeCmd extends BaseAsyncCmd { CallContext.current().setEventDetails("Volume Id: " + this._uuidMgr.getUuid(Volume.class, getId())); String uploadUrl = _volumeService.extractVolume(this); if (uploadUrl != null) { - ExtractResponse response = new ExtractResponse(); + ExtractResponse response = _responseGenerator.createVolumeExtractResponse(id, zoneId, getEntityOwnerId(), mode, uploadUrl); response.setResponseName(getCommandName()); - response.setObjectName("volume"); - Volume vol = _entityMgr.findById(Volume.class, id); - response.setId(vol.getUuid()); - response.setName(vol.getName()); - DataCenter zone = _entityMgr.findById(DataCenter.class, zoneId); - response.setZoneId(zone.getUuid()); - response.setZoneName(zone.getName()); - response.setMode(mode); - response.setState(Upload.Status.DOWNLOAD_URL_CREATED.toString()); - Account account = _entityMgr.findById(Account.class, getEntityOwnerId()); - response.setAccountId(account.getUuid()); - response.setUrl(uploadUrl); setResponseObject(response); } else { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to extract volume"); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ListVolumesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ListVolumesCmd.java index a583675da76..a1024a98898 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ListVolumesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ListVolumesCmd.java @@ -31,6 +31,7 @@ import org.apache.cloudstack.api.response.DiskOfferingResponse; import org.apache.cloudstack.api.response.HostResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.PodResponse; +import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VolumeResponse; @@ -80,6 +81,12 @@ public class ListVolumesCmd extends BaseListRetrieveOnlyResourceCountCmd impleme RoleType.Admin}) private String storageId; + @Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, + entityType = ServiceOfferingResponse.class, + description = "list volumes by disk offering of a service offering. If both service offering and " + + "disk offering are passed, service offering is ignored", since = "4.19.1") + private Long serviceOfferingId; + @Parameter(name = ApiConstants.DISK_OFFERING_ID, type = CommandType.UUID, entityType = DiskOfferingResponse.class, description = "list volumes by disk offering", since = "4.4") private Long diskOfferingId; @@ -94,6 +101,9 @@ public class ListVolumesCmd extends BaseListRetrieveOnlyResourceCountCmd impleme @Parameter(name = ApiConstants.STATE, type = CommandType.STRING, description = "state of the volume. Possible values are: Ready, Allocated, Destroy, Expunging, Expunged.") private String state; + @Parameter(name = ApiConstants.IS_ENCRYPTED, type = CommandType.BOOLEAN, description = "list only volumes that are encrypted", since = "4.19.1", + authorized = { RoleType.Admin }) + private Boolean encrypted; ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -118,6 +128,10 @@ public class ListVolumesCmd extends BaseListRetrieveOnlyResourceCountCmd impleme return podId; } + public Long getServiceOfferingId() { + return serviceOfferingId; + } + public Long getDiskOfferingId() { return diskOfferingId; } @@ -151,6 +165,10 @@ public class ListVolumesCmd extends BaseListRetrieveOnlyResourceCountCmd impleme return state; } + public Boolean isEncrypted() { + return encrypted; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java index 9254bad207b..65a3d6a7063 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java @@ -101,6 +101,10 @@ public class ResizeVolumeCmd extends BaseAsyncCmd implements UserCmd { return getEntityId(); } + public void setId(Long id) { + this.id = id; + } + public Long getMinIops() { return minIops; } @@ -113,6 +117,10 @@ public class ResizeVolumeCmd extends BaseAsyncCmd implements UserCmd { return size; } + public void setSize(Long size) { + this.size = size; + } + public boolean isShrinkOk() { return shrinkOk; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/UpdateVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/UpdateVolumeCmd.java index 467c587cc73..22b819c8cba 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/UpdateVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/UpdateVolumeCmd.java @@ -77,6 +77,14 @@ public class UpdateVolumeCmd extends BaseAsyncCustomIdCmd implements UserCmd { @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "new name of the volume", since = "4.16") private String name; + @Parameter(name = ApiConstants.DELETE_PROTECTION, + type = CommandType.BOOLEAN, since = "4.20.0", + description = "Set delete protection for the volume. If true, The volume " + + "will be protected from deletion. Note: If the volume is managed by " + + "another service like autoscaling groups or CKS, delete protection will be " + + "ignored.") + private Boolean deleteProtection; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -109,6 +117,10 @@ public class UpdateVolumeCmd extends BaseAsyncCustomIdCmd implements UserCmd { return name; } + public Boolean getDeleteProtection() { + return deleteProtection; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -168,7 +180,7 @@ public class UpdateVolumeCmd extends BaseAsyncCustomIdCmd implements UserCmd { public void execute() { CallContext.current().setEventDetails("Volume Id: " + this._uuidMgr.getUuid(Volume.class, getId())); Volume result = _volumeService.updateVolume(getId(), getPath(), getState(), getStorageId(), getDisplayVolume(), - getCustomId(), getEntityOwnerId(), getChainInfo(), getName()); + getDeleteProtection(), getCustomId(), getEntityOwnerId(), getChainInfo(), getName()); if (result != null) { VolumeResponse response = _responseGenerator.createVolumeResponse(getResponseView(), result); response.setResponseName(getCommandName()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java index 94f05f707a0..2f62d0d7210 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java @@ -75,10 +75,15 @@ public class CreateVPCCmd extends BaseAsyncCreateCmd implements UserCmd { private String displayText; - @Parameter(name = ApiConstants.CIDR, type = CommandType.STRING, required = true, description = "the cidr of the VPC. All VPC " + - "guest networks' cidrs should be within this CIDR") + @Parameter(name = ApiConstants.CIDR, type = CommandType.STRING, + description = "the cidr of the VPC. All VPC guest networks' cidrs should be within this CIDR") private String cidr; + @Parameter(name = ApiConstants.CIDR_SIZE, type = CommandType.INTEGER, + description = "the CIDR size of VPC. For regular users, this is required for VPC with ROUTED mode.", + since = "4.20.0") + private Integer cidrSize; + @Parameter(name = ApiConstants.VPC_OFF_ID, type = CommandType.UUID, entityType = VpcOfferingResponse.class, required = true, description = "the ID of the VPC offering") private Long vpcOffering; @@ -117,6 +122,9 @@ public class CreateVPCCmd extends BaseAsyncCreateCmd implements UserCmd { since = "4.19") private String sourceNatIP; + @Parameter(name=ApiConstants.AS_NUMBER, type=CommandType.LONG, since = "4.20.0", description="the AS Number of the VPC tiers") + private Long asNumber; + // /////////////////////////////////////////////////// // ///////////////// Accessors /////////////////////// // /////////////////////////////////////////////////// @@ -141,6 +149,10 @@ public class CreateVPCCmd extends BaseAsyncCreateCmd implements UserCmd { return cidr; } + public Integer getCidrSize() { + return cidrSize; + } + public String getDisplayText() { return StringUtils.isEmpty(displayText) ? vpcName : displayText; } @@ -189,6 +201,10 @@ public class CreateVPCCmd extends BaseAsyncCreateCmd implements UserCmd { return sourceNatIP; } + public Long getAsNumber() { + return asNumber; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -208,11 +224,7 @@ public class CreateVPCCmd extends BaseAsyncCreateCmd implements UserCmd { public void execute() { Vpc vpc = null; try { - if (isStart()) { - _vpcService.startVpc(getEntityId(), true); - } else { - logger.debug("Not starting VPC as " + ApiConstants.START + "=false was passed to the API"); - } + _vpcService.startVpc(this); vpc = _entityMgr.findById(Vpc.class, getEntityId()); } catch (ResourceUnavailableException ex) { logger.warn("Exception: ", ex); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/AddVpnUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/AddVpnUserCmd.java index 9e950310cdc..59ba7e94b04 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/AddVpnUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/AddVpnUserCmd.java @@ -131,6 +131,7 @@ public class AddVpnUserCmd extends BaseAsyncCreateCmd { if (domain != null) { vpnResponse.setDomainId(domain.getUuid()); vpnResponse.setDomainName(domain.getName()); + vpnResponse.setDomainPath(domain.getPath()); } vpnResponse.setResponseName(getCommandName()); diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ASNRangeResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ASNRangeResponse.java new file mode 100644 index 00000000000..86dab54ca6b --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/ASNRangeResponse.java @@ -0,0 +1,93 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import com.cloud.bgp.ASNumberRange; +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; + +import java.util.Date; + +@EntityReference(value = ASNumberRange.class) +public class ASNRangeResponse extends BaseResponse { + + @SerializedName(ApiConstants.ID) + @Param(description = "ID of the AS Number Range") + private String id; + + @SerializedName(ApiConstants.ZONE_ID) + @Param(description = "Zone ID") + private String zoneId; + + @SerializedName(ApiConstants.START_ASN) + @Param(description = "Start AS Number") + private Long startASNumber; + + @SerializedName(ApiConstants.END_ASN) + @Param(description = "End AS Number") + private Long endASNumber; + + @SerializedName(ApiConstants.CREATED) + @Param(description = "Created date") + private Date created; + + public ASNRangeResponse() { + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getZoneId() { + return zoneId; + } + + public void setZoneId(String zoneId) { + this.zoneId = zoneId; + } + + public Long getStartASNumber() { + return startASNumber; + } + + public void setStartASNumber(Long startASNumber) { + this.startASNumber = startASNumber; + } + + public Long getEndASNumber() { + return endASNumber; + } + + public void setEndASNumber(Long endASNumber) { + this.endASNumber = endASNumber; + } + + public Date getCreated() { + return created; + } + + public void setCreated(Date created) { + this.created = created; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ASNumberResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ASNumberResponse.java new file mode 100644 index 00000000000..45884250984 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/ASNumberResponse.java @@ -0,0 +1,237 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import com.cloud.bgp.ASNumber; +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; + +import java.util.Date; + +@EntityReference(value = ASNumber.class) +public class ASNumberResponse extends BaseResponse { + + @SerializedName(ApiConstants.ID) + @Param(description = "ID of the AS Number") + private String id; + + @SerializedName(ApiConstants.ACCOUNT_ID) + @Param(description = "Account ID") + private String accountId; + + @SerializedName(ApiConstants.ACCOUNT) + @Param(description = "the account name") + private String accountName; + + @SerializedName(ApiConstants.DOMAIN_ID) + @Param(description = "Domain ID") + private String domainId; + + @SerializedName(ApiConstants.DOMAIN) + @Param(description = "the domain name") + private String domainName; + + @SerializedName(ApiConstants.AS_NUMBER) + @Param(description = "AS Number") + private Long asNumber; + + @SerializedName(ApiConstants.ASN_RANGE_ID) + @Param(description = "AS Number ID") + private String asNumberRangeId; + + @SerializedName(ApiConstants.ASN_RANGE) + @Param(description = "AS Number Range") + private String asNumberRange; + + @SerializedName(ApiConstants.ZONE_ID) + @Param(description = "Zone ID") + private String zoneId; + + @SerializedName(ApiConstants.ZONE_NAME) + @Param(description = "the zone name of the AS Number range") + private String zoneName; + + @SerializedName("allocated") + @Param(description = "Allocated Date") + private Date allocated; + + @SerializedName(ApiConstants.ALLOCATION_STATE) + @Param(description = "Allocation state") + private String allocationState; + + @SerializedName(ApiConstants.ASSOCIATED_NETWORK_ID) + @Param(description = "Network ID") + private String associatedNetworkId; + + @SerializedName(ApiConstants.ASSOCIATED_NETWORK_NAME) + @Param(description = "Network Name") + private String associatedNetworkName; + + @SerializedName((ApiConstants.VPC_ID)) + @Param(description = "VPC ID") + private String vpcId; + + @SerializedName(ApiConstants.VPC_NAME) + @Param(description = "VPC Name") + private String vpcName; + + @SerializedName(ApiConstants.CREATED) + @Param(description = "Created Date") + private Date created; + + public ASNumberResponse() { + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getAccountId() { + return accountId; + } + + public void setAccountId(String accountId) { + this.accountId = accountId; + } + + public String getAccountName() { + return accountName; + } + + public void setAccountName(String accountName) { + this.accountName = accountName; + } + + public String getDomainId() { + return domainId; + } + + public void setDomainId(String domainId) { + this.domainId = domainId; + } + + public String getDomainName() { + return domainName; + } + + public void setDomainName(String domainName) { + this.domainName = domainName; + } + + public Long getAsNumber() { + return asNumber; + } + + public void setAsNumber(Long asNumber) { + this.asNumber = asNumber; + } + + public String getAsNumberRangeId() { + return asNumberRangeId; + } + + public void setAsNumberRangeId(String asNumberRangeId) { + this.asNumberRangeId = asNumberRangeId; + } + + public String getAsNumberRange() { + return asNumberRange; + } + + public void setAsNumberRange(String asNumberRange) { + this.asNumberRange = asNumberRange; + } + + public String getZoneId() { + return zoneId; + } + + public void setZoneId(String zoneId) { + this.zoneId = zoneId; + } + + public String getZoneName() { + return zoneName; + } + + public void setZoneName(String zoneName) { + this.zoneName = zoneName; + } + + public Date getAllocated() { + return allocated; + } + + public void setAllocated(Date allocatedDate) { + this.allocated = allocatedDate; + } + + public String getAllocationState() { + return allocationState; + } + + public void setAllocationState(String allocated) { + allocationState = allocated; + } + + public String getAssociatedNetworkId() { + return associatedNetworkId; + } + + public void setAssociatedNetworkId(String associatedNetworkId) { + this.associatedNetworkId = associatedNetworkId; + } + + public String getAssociatedNetworkName() { + return associatedNetworkName; + } + + public void setAssociatedNetworkName(String associatedNetworkName) { + this.associatedNetworkName = associatedNetworkName; + } + + public String getVpcId() { + return vpcId; + } + + public void setVpcId(String vpcId) { + this.vpcId = vpcId; + } + + public String getVpcName() { + return vpcName; + } + + public void setVpcName(String vpcName) { + this.vpcName = vpcName; + } + + public Date getCreated() { + return created; + } + + public void setCreated(Date created) { + this.created = created; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/AcquireIPAddressResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/AcquireIPAddressResponse.java index 7270fa949c8..06351a40d9d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/AcquireIPAddressResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/AcquireIPAddressResponse.java @@ -75,6 +75,10 @@ public class AcquireIPAddressResponse extends BaseResponse implements Controlle @Param(description = "the domain the public IP address is associated with") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the public IP address belongs", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.FOR_VIRTUAL_NETWORK) @Param(description = "the virtual network for the IP address") private Boolean forVirtualNetwork; @@ -190,6 +194,11 @@ public class AcquireIPAddressResponse extends BaseResponse implements Controlle this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + public void setForVirtualNetwork(Boolean forVirtualNetwork) { this.forVirtualNetwork = forVirtualNetwork; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/AcquirePodIpCmdResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/AcquirePodIpCmdResponse.java index 77c4d0d3ffc..2226efd063d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/AcquirePodIpCmdResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/AcquirePodIpCmdResponse.java @@ -44,7 +44,7 @@ public class AcquirePodIpCmdResponse extends BaseResponse { @SerializedName(ApiConstants.NIC_ID) @Param(description = "the ID of the nic") - private Long instanceId; + private Long nicId; @SerializedName(ApiConstants.HOST_MAC) @Param(description = "MAC address of the pod the IP") @@ -58,8 +58,8 @@ public class AcquirePodIpCmdResponse extends BaseResponse { this.ipAddress = ipAddress; } - public void setInstanceId(Long instanceId) { - this.instanceId = instanceId; + public void setNicId(Long nicId) { + this.nicId = nicId; } public void setPodId(long podId) { @@ -82,8 +82,8 @@ public class AcquirePodIpCmdResponse extends BaseResponse { return id; } - public Long getInstanceId() { - return instanceId; + public Long getNicId() { + return nicId; } public long getPodId() { diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ApplicationLoadBalancerResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ApplicationLoadBalancerResponse.java index 53e3f868a3b..0b6485bfc14 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ApplicationLoadBalancerResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ApplicationLoadBalancerResponse.java @@ -76,6 +76,10 @@ public class ApplicationLoadBalancerResponse extends BaseResponse implements Con @Param(description = "the domain of the Load Balancer") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the Load Balancer belongs", since = "4.19.2.0") + private String domainPath; + @SerializedName("loadbalancerrule") @Param(description = "the list of rules associated with the Load Balancer", responseObject = ApplicationLoadBalancerRuleResponse.class) private List lbRules; @@ -107,6 +111,11 @@ public class ApplicationLoadBalancerResponse extends BaseResponse implements Con this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + @Override public void setProjectId(String projectId) { this.projectId = projectId; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/AutoScalePolicyResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/AutoScalePolicyResponse.java index ae3462f1fec..e3f9902dec6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/AutoScalePolicyResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/AutoScalePolicyResponse.java @@ -74,6 +74,10 @@ public class AutoScalePolicyResponse extends BaseResponse implements ControlledE @Param(description = "the domain name of the autoscale policy") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the autoscale policy belongs", since = "4.19.2.0") + private String domainPath; + @Override public String getObjectId() { return this.id; @@ -118,6 +122,11 @@ public class AutoScalePolicyResponse extends BaseResponse implements ControlledE this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + @Override public void setProjectId(String projectId) { this.projectId = projectId; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/AutoScaleVmGroupResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/AutoScaleVmGroupResponse.java index 656a62ddda3..b1389acdecb 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/AutoScaleVmGroupResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/AutoScaleVmGroupResponse.java @@ -123,6 +123,10 @@ public class AutoScaleVmGroupResponse extends BaseResponseWithAnnotations implem @Param(description = "the domain name of the vm group") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the vm group belongs", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.FOR_DISPLAY) @Param(description = "is group for display to the regular user", since = "4.4", authorized = {RoleType.Admin}) private Boolean forDisplay; @@ -227,6 +231,11 @@ public class AutoScaleVmGroupResponse extends BaseResponseWithAnnotations implem this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + @Override public void setProjectId(String projectId) { this.projectId = projectId; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/AutoScaleVmProfileResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/AutoScaleVmProfileResponse.java index 9f238344730..22e4eb2288b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/AutoScaleVmProfileResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/AutoScaleVmProfileResponse.java @@ -69,7 +69,7 @@ public class AutoScaleVmProfileResponse extends BaseResponse implements Controll private Map counterParams; @SerializedName(ApiConstants.USER_DATA) - @Param(description = "Base 64 encoded VM user data") + @Param(description = "Base64 encoded VM user data") private String userData; @SerializedName(ApiConstants.USER_DATA_ID) @Param(description="the id of userdata used for the VM", since = "4.18.1") @@ -114,6 +114,10 @@ public class AutoScaleVmProfileResponse extends BaseResponse implements Controll @Param(description = "the domain name of the vm profile") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the vm profile belongs", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.FOR_DISPLAY) @Param(description = "is profile for display to the regular user", since = "4.4", authorized = {RoleType.Admin}) private Boolean forDisplay; @@ -196,6 +200,10 @@ public class AutoScaleVmProfileResponse extends BaseResponse implements Controll this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } @Override public void setProjectId(String projectId) { this.projectId = projectId; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/BackupRepositoryResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/BackupRepositoryResponse.java new file mode 100644 index 00000000000..3847176608c --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/BackupRepositoryResponse.java @@ -0,0 +1,154 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.response; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; +import org.apache.cloudstack.backup.BackupRepository; + +import java.util.Date; + +@EntityReference(value = BackupRepository.class) +public class BackupRepositoryResponse extends BaseResponse { + + @SerializedName(ApiConstants.ID) + @Param(description = "the ID of the backup repository") + private String id; + + @SerializedName(ApiConstants.ZONE_ID) + @Param(description = "the Zone ID of the backup repository") + private String zoneId; + + @SerializedName(ApiConstants.ZONE_NAME) + @Param(description = "the Zone name of the backup repository") + private String zoneName; + + @SerializedName(ApiConstants.NAME) + @Param(description = "the name of the backup repository") + private String name; + + @SerializedName(ApiConstants.ADDRESS) + @Param(description = "the address / url of the backup repository") + private String address; + + @SerializedName(ApiConstants.PROVIDER) + @Param(description = "name of the provider") + private String providerName; + + @SerializedName(ApiConstants.TYPE) + @Param(description = "backup type") + private String type; + + @SerializedName(ApiConstants.MOUNT_OPTIONS) + @Param(description = "mount options for the backup repository") + private String mountOptions; + + @SerializedName(ApiConstants.CAPACITY_BYTES) + @Param(description = "capacity of the backup repository") + private Long capacityBytes; + + @SerializedName("created") + @Param(description = "the date and time the backup repository was added") + private Date created; + + public BackupRepositoryResponse() { + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getZoneId() { + return zoneId; + } + + public void setZoneId(String zoneId) { + this.zoneId = zoneId; + } + + public String getZoneName() { + return zoneName; + } + + public void setZoneName(String zoneName) { + this.zoneName = zoneName; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getAddress() { + return address; + } + + public void setAddress(String address) { + this.address = address; + } + + public String getMountOptions() { + return mountOptions; + } + + public void setMountOptions(String mountOptions) { + this.mountOptions = mountOptions; + } + + public String getProviderName() { + return providerName; + } + + public void setProviderName(String providerName) { + this.providerName = providerName; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public Long getCapacityBytes() { + return capacityBytes; + } + + public void setCapacityBytes(Long capacityBytes) { + this.capacityBytes = capacityBytes; + } + + public Date getCreated() { + return created; + } + + public void setCreated(Date created) { + this.created = created; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/BgpPeerResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/BgpPeerResponse.java new file mode 100644 index 00000000000..344e65c6bad --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/BgpPeerResponse.java @@ -0,0 +1,200 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.response; + +import java.util.Date; +import java.util.Map; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; +import org.apache.cloudstack.network.BgpPeer; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +@EntityReference(value = BgpPeer.class) +public class BgpPeerResponse extends BaseResponse { + @SerializedName(ApiConstants.ID) + @Param(description = "id of the bgp peer") + private String id; + + @SerializedName(ApiConstants.IP_ADDRESS) + @Param(description = "IPv4 address of bgp peer") + private String ip4Address; + + @SerializedName(ApiConstants.IP6_ADDRESS) + @Param(description = "IPv6 address of bgp peer") + private String ip6Address; + + @SerializedName(ApiConstants.AS_NUMBER) + @Param(description = "AS number of bgp peer") + private Long asNumber; + + @SerializedName(ApiConstants.PASSWORD) + @Param(description = "password of bgp peer") + private String password; + + @SerializedName(ApiConstants.ZONE_ID) + @Param(description = "id of zone to which the bgp peer belongs to." ) + private String zoneId; + + @SerializedName(ApiConstants.ZONE_NAME) + @Param(description = "name of zone to which the bgp peer belongs to." ) + private String zoneName; + + @SerializedName(ApiConstants.CREATED) + @Param(description = "date when this bgp peer was created." ) + private Date created; + + @SerializedName(ApiConstants.ACCOUNT) + @Param(description = "the account of the bgp peer") + private String accountName; + + @SerializedName(ApiConstants.DOMAIN_ID) + @Param(description = "the domain ID of the bgp peer") + private String domainId; + + @SerializedName(ApiConstants.DOMAIN) + @Param(description = "the domain name of the bgp peer") + private String domainName; + + @SerializedName(ApiConstants.PROJECT_ID) + @Param(description = "the project id of the bgp peer") + private String projectId; + + @SerializedName(ApiConstants.PROJECT) + @Param(description = "the project name of the bgp peer") + private String projectName; + + @SerializedName(ApiConstants.DETAILS) + @Param(description = "additional key/value details of the bgp peer") + private Map details; + + public void setId(String id) { + this.id = id; + } + + public void setIp4Address(String ip4Address) { + this.ip4Address = ip4Address; + } + + public void setIp6Address(String ip6Address) { + this.ip6Address = ip6Address; + } + + public void setAsNumber(Long asNumber) { + this.asNumber = asNumber; + } + + public void setPassword(String password) { + this.password = password; + } + + public void setZoneId(String zoneId) { + this.zoneId = zoneId; + } + + public void setZoneName(String zoneName) { + this.zoneName = zoneName; + } + + public void setCreated(Date created) { + this.created = created; + } + + public void setAccountName(String accountName) { + this.accountName = accountName; + } + + public void setProjectId(String projectId) { + this.projectId = projectId; + } + + public void setProjectName(String projectName) { + this.projectName = projectName; + } + + public void setDomainId(String domainId) { + this.domainId = domainId; + } + + public void setDomainName(String domainName) { + this.domainName = domainName; + } + + public void setDetails(Map details) { + this.details = details; + } + + public String getId() { + return id; + } + + public String getIp4Address() { + return ip4Address; + } + + public String getIp6Address() { + return ip6Address; + } + + public Long getAsNumber() { + return asNumber; + } + + public String getPassword() { + return password; + } + + public String getZoneId() { + return zoneId; + } + + public String getZoneName() { + return zoneName; + } + + public Date getCreated() { + return created; + } + + public String getAccountName() { + return accountName; + } + + public String getDomainId() { + return domainId; + } + + public String getDomainName() { + return domainName; + } + + public String getProjectId() { + return projectId; + } + + public String getProjectName() { + return projectName; + } + + public Map getDetails() { + return details; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/BucketResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/BucketResponse.java index b75f3604324..f2dd365452c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/BucketResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/BucketResponse.java @@ -54,6 +54,10 @@ public class BucketResponse extends BaseResponseWithTagInformation implements Co @SerializedName(ApiConstants.DOMAIN) @Param(description = "the domain associated with the bucket") private String domainName; + + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the bucket belongs", since = "4.19.2.0") + private String domainPath; @SerializedName(ApiConstants.OBJECT_STORAGE_ID) @Param(description = "id of the object storage hosting the Bucket; returned to admin user only") private String objectStoragePoolId; @@ -98,7 +102,7 @@ public class BucketResponse extends BaseResponseWithTagInformation implements Co @Param(description = "Bucket Access Key") private String accessKey; - @SerializedName(ApiConstants.SECRET_KEY) + @SerializedName(ApiConstants.USER_SECRET_KEY) @Param(description = "Bucket Secret Key") private String secretKey; @@ -146,6 +150,11 @@ public class BucketResponse extends BaseResponseWithTagInformation implements Co this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + @Override public void setProjectId(String projectId) { this.projectId = projectId; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java index e4224c85e97..3861ac455ed 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java @@ -92,6 +92,10 @@ public class CapabilitiesResponse extends BaseResponse { @Param(description = "true if users can see all accounts within the same domain, false otherwise") private boolean allowUserViewAllDomainAccounts; + @SerializedName(ApiConstants.ALLOW_USER_FORCE_STOP_VM) + @Param(description = "true if users are allowed to force stop a vm, false otherwise", since = "4.20.0") + private boolean allowUserForceStopVM; + @SerializedName("kubernetesserviceenabled") @Param(description = "true if Kubernetes Service plugin is enabled, false otherwise") private boolean kubernetesServiceEnabled; @@ -124,6 +128,14 @@ public class CapabilitiesResponse extends BaseResponse { @Param(description = "the retention time for Instances disks stats", since = "4.18.0") private Integer instancesDisksStatsRetentionTime; + @SerializedName(ApiConstants.SHAREDFSVM_MIN_CPU_COUNT) + @Param(description = "the min CPU count for the service offering used by the shared filesystem instance", since = "4.20.0") + private Integer sharedFsVmMinCpuCount; + + @SerializedName(ApiConstants.SHAREDFSVM_MIN_RAM_SIZE) + @Param(description = "the min Ram size for the service offering used by the shared filesystem instance", since = "4.20.0") + private Integer sharedFsVmMinRamSize; + public void setSecurityGroupsEnabled(boolean securityGroupsEnabled) { this.securityGroupsEnabled = securityGroupsEnabled; } @@ -192,6 +204,10 @@ public class CapabilitiesResponse extends BaseResponse { this.allowUserViewAllDomainAccounts = allowUserViewAllDomainAccounts; } + public void setAllowUserForceStopVM(boolean allowUserForceStopVM) { + this.allowUserForceStopVM = allowUserForceStopVM; + } + public void setKubernetesServiceEnabled(boolean kubernetesServiceEnabled) { this.kubernetesServiceEnabled = kubernetesServiceEnabled; } @@ -223,4 +239,12 @@ public class CapabilitiesResponse extends BaseResponse { public void setCustomHypervisorDisplayName(String customHypervisorDisplayName) { this.customHypervisorDisplayName = customHypervisorDisplayName; } + + public void setSharedFsVmMinCpuCount(Integer sharedFsVmMinCpuCount) { + this.sharedFsVmMinCpuCount = sharedFsVmMinCpuCount; + } + + public void setSharedFsVmMinRamSize(Integer sharedFsVmMinRamSize) { + this.sharedFsVmMinRamSize = sharedFsVmMinRamSize; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ClusterResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ClusterResponse.java index 72dab3da3b1..1c69849239f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ClusterResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ClusterResponse.java @@ -73,7 +73,7 @@ public class ClusterResponse extends BaseResponseWithAnnotations { @SerializedName("capacity") @Param(description = "the capacity of the Cluster", responseObject = CapacityResponse.class) - private List capacitites; + private List capacities; @SerializedName("cpuovercommitratio") @Param(description = "The cpu overcommit ratio of the cluster") @@ -91,6 +91,10 @@ public class ClusterResponse extends BaseResponseWithAnnotations { @Param(description = "Meta data associated with the zone (key/value pairs)") private Map resourceDetails; + @SerializedName(ApiConstants.ARCH) + @Param(description = "CPU Arch of the hosts in the cluster", since = "4.20") + private String arch; + public String getId() { return id; } @@ -171,12 +175,12 @@ public class ClusterResponse extends BaseResponseWithAnnotations { this.managedState = managedState; } - public List getCapacitites() { - return capacitites; + public List getCapacities() { + return capacities; } - public void setCapacitites(ArrayList arrayList) { - this.capacitites = arrayList; + public void setCapacities(ArrayList arrayList) { + this.capacities = arrayList; } public void setCpuOvercommitRatio(String cpuovercommitratio) { @@ -219,4 +223,40 @@ public class ClusterResponse extends BaseResponseWithAnnotations { public Map getResourceDetails() { return resourceDetails; } + + public String getCpuovercommitratio() { + return cpuovercommitratio; + } + + public void setCpuovercommitratio(String cpuovercommitratio) { + this.cpuovercommitratio = cpuovercommitratio; + } + + public String getMemoryovercommitratio() { + return memoryovercommitratio; + } + + public void setMemoryovercommitratio(String memoryovercommitratio) { + this.memoryovercommitratio = memoryovercommitratio; + } + + public String getOvm3vip() { + return ovm3vip; + } + + public void setOvm3vip(String ovm3vip) { + this.ovm3vip = ovm3vip; + } + + public void setCapacities(List capacities) { + this.capacities = capacities; + } + + public void setArch(String arch) { + this.arch = arch; + } + + public String getArch() { + return arch; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ConditionResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ConditionResponse.java index 15671430bf1..1038177cb86 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ConditionResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ConditionResponse.java @@ -61,6 +61,10 @@ public class ConditionResponse extends BaseResponse implements ControlledEntityR @Param(description = "the domain name of the owner.") private String domain; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the Condition owner belongs", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.ZONE_ID) @Param(description = "zone id of counter") private String zoneId; @@ -138,4 +142,9 @@ public class ConditionResponse extends BaseResponse implements ControlledEntityR public void setDomainName(String domainName) { this.domain = domainName; } + + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ControlledEntityResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ControlledEntityResponse.java index 598ef082253..dc021705d78 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ControlledEntityResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ControlledEntityResponse.java @@ -27,4 +27,6 @@ public interface ControlledEntityResponse { public void setDomainId(String domainId); public void setDomainName(String domainName); + + public void setDomainPath(String domainPath); } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ControlledViewEntityResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ControlledViewEntityResponse.java index abe4dd77143..730c2c38fc5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ControlledViewEntityResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ControlledViewEntityResponse.java @@ -27,4 +27,6 @@ public interface ControlledViewEntityResponse { public void setDomainId(String domainId); public void setDomainName(String domainName); + + public void setDomainPath(String domainPath); } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/DataCenterIpv4SubnetResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/DataCenterIpv4SubnetResponse.java new file mode 100644 index 00000000000..a1a87794a88 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/DataCenterIpv4SubnetResponse.java @@ -0,0 +1,151 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.response; + +import java.util.Date; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; +import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnet; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +@EntityReference(value = DataCenterIpv4GuestSubnet.class) +public class DataCenterIpv4SubnetResponse extends BaseResponse { + @SerializedName(ApiConstants.ID) + @Param(description = "id of the guest IPv4 subnet") + private String id; + + @SerializedName(ApiConstants.SUBNET) + @Param(description = "guest IPv4 subnet") + private String subnet; + + @SerializedName(ApiConstants.ZONE_ID) + @Param(description = "id of zone to which the IPv4 subnet belongs to." ) + private String zoneId; + + @SerializedName(ApiConstants.ZONE_NAME) + @Param(description = "name of zone to which the IPv4 subnet belongs to." ) + private String zoneName; + + @SerializedName(ApiConstants.CREATED) + @Param(description = "date when this IPv4 subnet was created." ) + private Date created; + + @SerializedName(ApiConstants.ACCOUNT) + @Param(description = "the account of the IPv4 subnet") + private String accountName; + + @SerializedName(ApiConstants.DOMAIN_ID) + @Param(description = "the domain ID of the IPv4 subnet") + private String domainId; + + @SerializedName(ApiConstants.DOMAIN) + @Param(description = "the domain name of the IPv4 subnet") + private String domainName; + + @SerializedName(ApiConstants.PROJECT_ID) + @Param(description = "the project id of the IPv4 subnet") + private String projectId; + + @SerializedName(ApiConstants.PROJECT) + @Param(description = "the project name of the IPv4 subnet") + private String projectName; + + public void setId(String id) { + this.id = id; + } + + public void setSubnet(String subnet) { + this.subnet = subnet; + } + + public void setZoneId(String zoneId) { + this.zoneId = zoneId; + } + + public void setZoneName(String zoneName) { + this.zoneName = zoneName; + } + + public void setCreated(Date created) { + this.created = created; + } + + public void setAccountName(String accountName) { + this.accountName = accountName; + } + + public void setProjectId(String projectId) { + this.projectId = projectId; + } + + public void setProjectName(String projectName) { + this.projectName = projectName; + } + + public void setDomainId(String domainId) { + this.domainId = domainId; + } + + public void setDomainName(String domainName) { + this.domainName = domainName; + } + + public String getId() { + return id; + } + + public String getSubnet() { + return subnet; + } + + public String getZoneId() { + return zoneId; + } + + public String getZoneName() { + return zoneName; + } + + public Date getCreated() { + return created; + } + + public String getAccountName() { + return accountName; + } + + public String getDomainId() { + return domainId; + } + + public String getDomainName() { + return domainName; + } + + public String getProjectId() { + return projectId; + } + + public String getProjectName() { + return projectName; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/DomainRouterResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/DomainRouterResponse.java index 99e5f6ccdfa..b23d0f4b527 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/DomainRouterResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/DomainRouterResponse.java @@ -185,6 +185,10 @@ public class DomainRouterResponse extends BaseResponseWithAnnotations implements @Param(description = "the domain associated with the router") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the Domain the router belongs to", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.SERVICE_OFFERING_ID) @Param(description = "the ID of the service offering of the virtual machine") private String serviceOfferingId; @@ -381,6 +385,10 @@ public class DomainRouterResponse extends BaseResponseWithAnnotations implements this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } public void setPublicNetworkId(String publicNetworkId) { this.publicNetworkId = publicNetworkId; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/EventResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/EventResponse.java index 8f65492cb70..751d00922f1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/EventResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/EventResponse.java @@ -69,6 +69,10 @@ public class EventResponse extends BaseResponse implements ControlledViewEntityR @Param(description = "the name of the account's domain") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the Domain the account's domain belongs to", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.RESOURCE_ID) @Param(description = "the id of the resource", since = "4.17.0") private String resourceId; @@ -132,6 +136,11 @@ public class EventResponse extends BaseResponse implements ControlledViewEntityR this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + public void setResourceId(String resourceId) { this.resourceId = resourceId; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/GlobalLoadBalancerResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/GlobalLoadBalancerResponse.java index aac7e29a173..d41e4d2ab34 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/GlobalLoadBalancerResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/GlobalLoadBalancerResponse.java @@ -82,6 +82,10 @@ public class GlobalLoadBalancerResponse extends BaseResponse implements Controll @Param(description = "the domain of the load balancer rule") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the load balancer rule belongs", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.LOAD_BALANCER_RULE) @Param(description = "List of load balancer rules that are part of GSLB rule", responseObject = LoadBalancerResponse.class) private List siteLoadBalancers; @@ -143,6 +147,11 @@ public class GlobalLoadBalancerResponse extends BaseResponse implements Controll this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + public void setSiteLoadBalancers(List siteLoadBalancers) { this.siteLoadBalancers = siteLoadBalancers; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/GuestVlanRangeResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/GuestVlanRangeResponse.java index fab2a2cb4ce..133338d27ae 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/GuestVlanRangeResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/GuestVlanRangeResponse.java @@ -44,6 +44,10 @@ public class GuestVlanRangeResponse extends BaseResponse implements ControlledEn @Param(description = "the domain name of the guest VLAN range") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the guest VLAN range belongs", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.GUEST_VLAN_RANGE) @Param(description = "the guest VLAN range") private String guestVlanRange; @@ -83,6 +87,10 @@ public class GuestVlanRangeResponse extends BaseResponse implements ControlledEn this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } public void setGuestVlanRange(String guestVlanRange) { this.guestVlanRange = guestVlanRange; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/GuestVlanResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/GuestVlanResponse.java index 6bcc1d35cb1..6716911ab82 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/GuestVlanResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/GuestVlanResponse.java @@ -49,6 +49,10 @@ public class GuestVlanResponse extends BaseResponse implements ControlledEntityR @Param(description = "the domain name of the guest VLAN range") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the guest VLAN range belongs", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.PROJECT_ID) @Param(description = "the project id of the guest VLAN range") private String projectId; @@ -108,6 +112,10 @@ public class GuestVlanResponse extends BaseResponse implements ControlledEntityR this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } @Override public void setProjectId(String projectId) { this.projectId = projectId; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java index 3a88b819572..62bcc07b16d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/HostResponse.java @@ -29,6 +29,7 @@ import org.apache.cloudstack.outofbandmanagement.OutOfBandManagement; import com.cloud.host.Host; import com.cloud.host.Status; +import com.cloud.hypervisor.Hypervisor; import com.cloud.serializer.Param; import com.google.gson.annotations.SerializedName; @@ -279,12 +280,20 @@ public class HostResponse extends BaseResponseWithAnnotations { @SerializedName("ueficapability") @Param(description = "true if the host has capability to support UEFI boot") - private Boolean uefiCapabilty; + private Boolean uefiCapability; @SerializedName(ApiConstants.ENCRYPTION_SUPPORTED) @Param(description = "true if the host supports encryption", since = "4.18") private Boolean encryptionSupported; + @SerializedName(ApiConstants.INSTANCE_CONVERSION_SUPPORTED) + @Param(description = "true if the host supports instance conversion (using virt-v2v)", since = "4.19.1") + private Boolean instanceConversionSupported; + + @SerializedName(ApiConstants.ARCH) + @Param(description = "CPU Arch of the host", since = "4.20") + private String arch; + @Override public String getObjectId() { return this.getId(); @@ -550,7 +559,7 @@ public class HostResponse extends BaseResponseWithAnnotations { this.username = username; } - public void setDetails(Map details) { + public void setDetails(Map details, Hypervisor.HypervisorType hypervisorType) { if (details == null) { return; @@ -571,6 +580,15 @@ public class HostResponse extends BaseResponseWithAnnotations { this.setEncryptionSupported(new Boolean(false)); // default } + if (Hypervisor.HypervisorType.KVM.equals(hypervisorType)) { + if (detailsCopy.containsKey(Host.HOST_INSTANCE_CONVERSION)) { + this.setInstanceConversionSupported(Boolean.parseBoolean((String) detailsCopy.get(Host.HOST_INSTANCE_CONVERSION))); + detailsCopy.remove(Host.HOST_INSTANCE_CONVERSION); + } else { + this.setInstanceConversionSupported(new Boolean(false)); // default + } + } + this.details = detailsCopy; } @@ -721,7 +739,7 @@ public class HostResponse extends BaseResponseWithAnnotations { return clusterType; } - public Boolean isLocalStorageActive() { + public Boolean getLocalStorageActive() { return localStorageActive; } @@ -741,7 +759,7 @@ public class HostResponse extends BaseResponseWithAnnotations { return hasEnoughCapacity; } - public Boolean isSuitableForMigration() { + public Boolean getSuitableForMigration() { return suitableForMigration; } @@ -753,14 +771,18 @@ public class HostResponse extends BaseResponseWithAnnotations { return haHost; } - public void setUefiCapabilty(Boolean hostCapability) { - this.uefiCapabilty = hostCapability; + public void setUefiCapability(Boolean hostCapability) { + this.uefiCapability = hostCapability; } public void setEncryptionSupported(Boolean encryptionSupported) { this.encryptionSupported = encryptionSupported; } + public void setInstanceConversionSupported(Boolean instanceConversionSupported) { + this.instanceConversionSupported = instanceConversionSupported; + } + public Boolean getIsTagARule() { return isTagARule; } @@ -768,4 +790,84 @@ public class HostResponse extends BaseResponseWithAnnotations { public void setIsTagARule(Boolean tagARule) { isTagARule = tagARule; } + + public void setArch(String arch) { + this.arch = arch; + } + + public String getArch() { + return arch; + } + + public Long getCpuAllocatedValue() { + return cpuAllocatedValue; + } + + public String getCpuAllocatedPercentage() { + return cpuAllocatedPercentage; + } + + public String getCpuAllocatedWithOverprovisioning() { + return cpuAllocatedWithOverprovisioning; + } + + public Double getCpuloadaverage() { + return cpuloadaverage; + } + + public void setCpuloadaverage(Double cpuloadaverage) { + this.cpuloadaverage = cpuloadaverage; + } + + public String getMemWithOverprovisioning() { + return memWithOverprovisioning; + } + + public String getMemoryAllocatedPercentage() { + return memoryAllocatedPercentage; + } + + public Long getMemoryAllocatedBytes() { + return memoryAllocatedBytes; + } + + public Boolean getTagARule() { + return isTagARule; + } + + public void setTagARule(Boolean tagARule) { + isTagARule = tagARule; + } + + public Boolean getHasEnoughCapacity() { + return hasEnoughCapacity; + } + + public void setDetails(Map details) { + this.details = details; + } + + public String getAnnotation() { + return annotation; + } + + public Date getLastAnnotated() { + return lastAnnotated; + } + + public String getUsername() { + return username; + } + + public Boolean getUefiCapability() { + return uefiCapability; + } + + public Boolean getEncryptionSupported() { + return encryptionSupported; + } + + public Boolean getInstanceConversionSupported() { + return instanceConversionSupported; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/IPAddressResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/IPAddressResponse.java index 8a9bf7789dd..0018edc8638 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/IPAddressResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/IPAddressResponse.java @@ -75,6 +75,10 @@ public class IPAddressResponse extends BaseResponseWithAnnotations implements Co @Param(description = "the domain the public IP address is associated with") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the public IP address belongs", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.FOR_VIRTUAL_NETWORK) @Param(description = "the virtual network for the IP address") private Boolean forVirtualNetwork; @@ -211,6 +215,10 @@ public class IPAddressResponse extends BaseResponseWithAnnotations implements Co this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } public void setForVirtualNetwork(Boolean forVirtualNetwork) { this.forVirtualNetwork = forVirtualNetwork; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java index 532963dbddc..ee44b6bc474 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java @@ -27,11 +27,11 @@ import com.google.gson.annotations.SerializedName; @EntityReference(value = ImageStore.class) public class ImageStoreResponse extends BaseResponseWithAnnotations { - @SerializedName("id") + @SerializedName(ApiConstants.ID) @Param(description = "the ID of the image store") private String id; - @SerializedName("zoneid") + @SerializedName(ApiConstants.ZONE_ID) @Param(description = "the Zone ID of the image store") private String zoneId; @@ -39,15 +39,15 @@ public class ImageStoreResponse extends BaseResponseWithAnnotations { @Param(description = "the Zone name of the image store") private String zoneName; - @SerializedName("name") + @SerializedName(ApiConstants.NAME) @Param(description = "the name of the image store") private String name; - @SerializedName("url") + @SerializedName(ApiConstants.URL) @Param(description = "the url of the image store") private String url; - @SerializedName("protocol") + @SerializedName(ApiConstants.PROTOCOL) @Param(description = "the protocol of the image store") private String protocol; @@ -55,11 +55,11 @@ public class ImageStoreResponse extends BaseResponseWithAnnotations { @Param(description = "the provider name of the image store") private String providerName; - @SerializedName("scope") + @SerializedName(ApiConstants.SCOPE) @Param(description = "the scope of the image store") private ScopeType scope; - @SerializedName("readonly") + @SerializedName(ApiConstants.READ_ONLY) @Param(description = "defines if store is read-only") private Boolean readonly; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/InstanceGroupResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/InstanceGroupResponse.java index e1241cc19bc..9c7a4fc09a1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/InstanceGroupResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/InstanceGroupResponse.java @@ -63,6 +63,10 @@ public class InstanceGroupResponse extends BaseResponseWithAnnotations implement @Param(description = "the domain name of the instance group") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the Domain the instance group belongs to", since = "4.19.2.0") + private String domainPath; + public void setId(String id) { this.id = id; } @@ -90,6 +94,11 @@ public class InstanceGroupResponse extends BaseResponseWithAnnotations implement this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + @Override public void setProjectId(String projectId) { this.projectId = projectId; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/Ipv4RouteResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/Ipv4RouteResponse.java new file mode 100644 index 00000000000..136c87971b7 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/Ipv4RouteResponse.java @@ -0,0 +1,59 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.response; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +public class Ipv4RouteResponse extends BaseResponse { + + @SerializedName(ApiConstants.SUBNET) + @Param(description = "the guest Ipv4 cidr for route") + private String subnet; + + @SerializedName(ApiConstants.GATEWAY) + @Param(description = "the outbound Ipv4 gateway") + private String gateway; + + public Ipv4RouteResponse() { + } + + public Ipv4RouteResponse(String subnet, String gateway) { + this.subnet = subnet; + this.gateway = gateway; + } + + public String getSubnet() { + return subnet; + } + + public void setSubnet(String subnet) { + this.subnet = subnet; + } + + public String getGateway() { + return gateway; + } + + public void setGateway(String gateway) { + this.gateway = gateway; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/Ipv4SubnetForGuestNetworkResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/Ipv4SubnetForGuestNetworkResponse.java new file mode 100644 index 00000000000..1430bcd059c --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/Ipv4SubnetForGuestNetworkResponse.java @@ -0,0 +1,199 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.response; + +import java.util.Date; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; +import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMap; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +@EntityReference(value = Ipv4GuestSubnetNetworkMap.class) +public class Ipv4SubnetForGuestNetworkResponse extends BaseResponse { + @SerializedName(ApiConstants.ID) + @Param(description = "id of the IPv4 subnet for guest network") + private String id; + + @SerializedName(ApiConstants.PARENT_ID) + @Param(description = "id of the data center IPv4 subnet") + private String parentId; + + @SerializedName(ApiConstants.PARENT_SUBNET) + @Param(description = "subnet of the data center IPv4 subnet") + private String parentSubnet; + + @SerializedName(ApiConstants.SUBNET) + @Param(description = "subnet of the IPv4 network") + private String subnet; + + @SerializedName(ApiConstants.STATE) + @Param(description = "state of subnet of the IPv4 network") + private String state; + + @SerializedName(ApiConstants.ZONE_ID) + @Param(description = "id of zone to which the IPv4 subnet belongs to." ) + private String zoneId; + + @SerializedName(ApiConstants.ZONE_NAME) + @Param(description = "id of zone to which the IPv4 subnet belongs to." ) + private String zoneName; + + @SerializedName(ApiConstants.NETWORK_ID) + @Param(description = "id of network which the IPv4 subnet is associated with." ) + private String networkId; + + @SerializedName(ApiConstants.NETWORK_NAME) + @Param(description = "name of network which the IPv4 subnet is associated with." ) + private String networkName; + + @SerializedName(ApiConstants.VPC_ID) + @Param(description = "Id of the VPC which the IPv4 subnet is associated with.") + private String vpcId; + + @SerializedName(ApiConstants.VPC_NAME) + @Param(description = "Name of the VPC which the IPv4 subnet is associated with.") + private String vpcName; + + @SerializedName(ApiConstants.CREATED) + @Param(description = "date when this IPv4 subnet was created." ) + private Date created; + + @SerializedName(ApiConstants.REMOVED) + @Param(description = "date when this IPv4 subnet was removed." ) + private Date removed; + + @SerializedName(ApiConstants.ALLOCATED_TIME) + @Param(description = "date when this IPv4 subnet was allocated." ) + private Date allocatedTime; + + public void setId(String id) { + this.id = id; + } + + public void setParentId(String parentId) { + this.parentId = parentId; + } + + public void setParentSubnet(String parentSubnet) { + this.parentSubnet = parentSubnet; + } + + public void setSubnet(String subnet) { + this.subnet = subnet; + } + + public void setState(String state) { + this.state = state; + } + + public void setNetworkId(String networkId) { + this.networkId = networkId; + } + + public void setNetworkName(String networkName) { + this.networkName = networkName; + } + + public void setVpcId(String vpcId) { + this.vpcId = vpcId; + } + + public void setVpcName(String vpcName) { + this.vpcName = vpcName; + } + + public void setZoneId(String zoneId) { + this.zoneId = zoneId; + } + + public void setZoneName(String zoneName) { + this.zoneName = zoneName; + } + + public void setCreated(Date created) { + this.created = created; + } + + public void setRemoved(Date removed) { + this.removed = removed; + } + + public void setAllocatedTime(Date allocatedTime) { + this.allocatedTime = allocatedTime; + } + + public String getId() { + return id; + } + + public String getParentId() { + return parentId; + } + + public String getParentSubnet() { + return parentSubnet; + } + + public String getSubnet() { + return subnet; + } + + public String getState() { + return state; + } + + public String getZoneId() { + return zoneId; + } + + public String getZoneName() { + return zoneName; + } + + public String getNetworkId() { + return networkId; + } + + public String getNetworkName() { + return networkName; + } + + public String getVpcId() { + return vpcId; + } + + public String getVpcName() { + return vpcName; + } + + public Date getCreated() { + return created; + } + + public Date getRemoved() { + return removed; + } + + public Date getAllocatedTime() { + return allocatedTime; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/LoadBalancerResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/LoadBalancerResponse.java index 32748269587..e520dec09e7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/LoadBalancerResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/LoadBalancerResponse.java @@ -87,6 +87,10 @@ public class LoadBalancerResponse extends BaseResponse implements ControlledEnti @Param(description = "the domain of the load balancer rule") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the load balancer rule belongs", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.STATE) @Param(description = "the state of the rule") private String state; @@ -158,6 +162,11 @@ public class LoadBalancerResponse extends BaseResponse implements ControlledEnti this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + public void setState(String state) { this.state = state; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ManagementServerResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ManagementServerResponse.java index 330f91e69f3..a471045eb67 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ManagementServerResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ManagementServerResponse.java @@ -167,4 +167,8 @@ public class ManagementServerResponse extends BaseResponse { public void setServiceIp(String serviceIp) { this.serviceIp = serviceIp; } + + public String getKernelVersion() { + return kernelVersion; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/NetworkOfferingResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/NetworkOfferingResponse.java index b73163a5d05..81a8129ecb7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/NetworkOfferingResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/NetworkOfferingResponse.java @@ -107,9 +107,9 @@ public class NetworkOfferingResponse extends BaseResponseWithAnnotations { @Param(description = "true if network offering can be used by Tungsten-Fabric networks only") private Boolean forTungsten; - @SerializedName(ApiConstants.NSX_MODE) - @Param(description = "Mode in which the network will operate. This parameter is only relevant for NSX offerings") - private String nsxMode; + @SerializedName(ApiConstants.NETWORK_MODE) + @Param(description = "Mode in which the network will operate. The valid values are NATTED and ROUTED") + private String networkMode; @SerializedName(ApiConstants.IS_PERSISTENT) @Param(description = "true if network offering supports persistent networks, false otherwise") @@ -159,6 +159,14 @@ public class NetworkOfferingResponse extends BaseResponseWithAnnotations { @Param(description = "the internet protocol of the network offering") private String internetProtocol; + @SerializedName(ApiConstants.SPECIFY_AS_NUMBER) + @Param(description = "true if network offering supports choosing AS numbers") + private Boolean specifyAsNumber; + + @SerializedName(ApiConstants.ROUTING_MODE) + @Param(description = "the routing mode for the network offering, supported types are Static or Dynamic.") + private String routingMode; + public void setId(String id) { this.id = id; } @@ -235,8 +243,8 @@ public class NetworkOfferingResponse extends BaseResponseWithAnnotations { this.forTungsten = forTungsten; } - public void setNsxMode(String nsxMode) { - this.nsxMode = nsxMode; + public void setNetworkMode(String networkMode) { + this.networkMode = networkMode; } public void setIsPersistent(Boolean isPersistent) { @@ -306,4 +314,20 @@ public class NetworkOfferingResponse extends BaseResponseWithAnnotations { public void setInternetProtocol(String internetProtocol) { this.internetProtocol = internetProtocol; } + + public Boolean getSpecifyAsNumber() { + return specifyAsNumber; + } + + public void setSpecifyAsNumber(Boolean specifyAsNumber) { + this.specifyAsNumber = specifyAsNumber; + } + + public String getRoutingMode() { + return routingMode; + } + + public void setRoutingMode(String routingMode) { + this.routingMode = routingMode; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/NetworkResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/NetworkResponse.java index d34f949372a..a80317c83cd 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/NetworkResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/NetworkResponse.java @@ -17,6 +17,7 @@ package org.apache.cloudstack.api.response; import java.util.Date; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -135,6 +136,14 @@ public class NetworkResponse extends BaseResponseWithAssociatedNetwork implement @Param(description = "The vlan of the network. This parameter is visible to ROOT admins only") private String vlan; + @SerializedName(ApiConstants.AS_NUMBER_ID) + @Param(description = "UUID of AS NUMBER", since = "4.20.0") + private String asNumberId; + + @SerializedName(ApiConstants.AS_NUMBER) + @Param(description = "AS NUMBER", since = "4.20.0") + private Long asNumber; + @SerializedName(ApiConstants.ACL_TYPE) @Param(description = "acl type - access type to the network") private String aclType; @@ -292,7 +301,7 @@ public class NetworkResponse extends BaseResponseWithAssociatedNetwork implement private String internetProtocol; @SerializedName(ApiConstants.IPV6_ROUTING) - @Param(description = "The routing mode of network offering", since = "4.17.0") + @Param(description = "The Ipv6 routing type of network offering", since = "4.17.0") private String ipv6Routing; @SerializedName(ApiConstants.IPV6_ROUTES) @@ -315,6 +324,18 @@ public class NetworkResponse extends BaseResponseWithAssociatedNetwork implement @Param(description = "the second IPv6 DNS for the network", since = "4.18.0") private String ipv6Dns2; + @SerializedName(ApiConstants.IPV4_ROUTING) + @Param(description = "The IPv4 routing type of network", since = "4.20.0") + private String ipv4Routing; + + @SerializedName(ApiConstants.IPV4_ROUTES) + @Param(description = "The routes for the network to ease adding route in upstream router", since = "4.20.0") + private Set ipv4Routes; + + @SerializedName(ApiConstants.BGP_PEERS) + @Param(description = "The BGP peers for the network", since = "4.20.0") + private Set bgpPeers; + public NetworkResponse() {} public Boolean getDisplayNetwork() { @@ -415,6 +436,14 @@ public class NetworkResponse extends BaseResponseWithAssociatedNetwork implement this.vlan = vlan; } + public void setAsNumber(long asNumber) { + this.asNumber = asNumber; + } + + public void setAsNumberId(String asNumberId) { + this.asNumberId = asNumberId; + } + public void setIsSystem(Boolean isSystem) { this.isSystem = isSystem; } @@ -424,6 +453,7 @@ public class NetworkResponse extends BaseResponseWithAssociatedNetwork implement this.domain = domain; } + @Override public void setDomainPath(String domainPath) { this.domainPath = domainPath; } @@ -623,6 +653,18 @@ public class NetworkResponse extends BaseResponseWithAssociatedNetwork implement this.internetProtocol = internetProtocol; } + public void setIpv4Routing(String ipv4Routing) { + this.ipv4Routing = ipv4Routing; + } + + public void setIpv4Routes(Set ipv4Routes) { + this.ipv4Routes = ipv4Routes; + } + + public void addIpv4Route(Ipv4RouteResponse ipv4Route) { + this.ipv4Routes.add(ipv4Route); + } + public void setIpv6Routing(String ipv6Routing) { this.ipv6Routing = ipv6Routing; } @@ -635,6 +677,17 @@ public class NetworkResponse extends BaseResponseWithAssociatedNetwork implement this.ipv6Routes.add(ipv6Route); } + public void setBgpPeers(Set bgpPeers) { + this.bgpPeers = bgpPeers; + } + + public void addBgpPeer(BgpPeerResponse bgpPeer) { + if (this.bgpPeers == null) { + this.setBgpPeers(new LinkedHashSet<>()); + } + this.bgpPeers.add(bgpPeer); + } + public Integer getPublicMtu() { return publicMtu; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/OvsProviderResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/OvsProviderResponse.java index ac5b9e309c8..2b67e1618dc 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/OvsProviderResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/OvsProviderResponse.java @@ -54,6 +54,10 @@ public class OvsProviderResponse extends BaseResponse implements @Param(description = "the domain associated with the provider") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the provider belongs", since = "4.19.2.0") + private String domainPath; + @Override public void setAccountName(String accountName) { this.accountName = accountName; @@ -73,6 +77,10 @@ public class OvsProviderResponse extends BaseResponse implements this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } @Override public void setProjectId(String projectId) { this.projectId = projectId; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/PrivateGatewayResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/PrivateGatewayResponse.java index 65401eb2a02..414aed94bad 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/PrivateGatewayResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/PrivateGatewayResponse.java @@ -89,6 +89,10 @@ public class PrivateGatewayResponse extends BaseResponseWithAssociatedNetwork im @Param(description = "the domain associated with the private gateway") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the private gateway belongs", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.STATE) @Param(description = "State of the gateway, can be Creating, Ready, Deleting") private String state; @@ -165,6 +169,10 @@ public class PrivateGatewayResponse extends BaseResponseWithAssociatedNetwork im this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } @Override public void setProjectId(String projectId) { this.projectId = projectId; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ProjectAccountResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ProjectAccountResponse.java index aab03074155..d035622f65f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ProjectAccountResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ProjectAccountResponse.java @@ -73,6 +73,10 @@ public class ProjectAccountResponse extends BaseResponse implements ControlledVi @Param(description = "name of the Domain the account belongs too") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the Domain the account belongs to", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.USER) @Param(description = "the list of users associated with account", responseObject = UserResponse.class) private List users; @@ -110,6 +114,11 @@ public class ProjectAccountResponse extends BaseResponse implements ControlledVi this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + public void setUserId(String userId) { this.userId = userId; } public void setProjectRoleId(String projectRoleId) { diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ProjectInvitationResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ProjectInvitationResponse.java index 4462ea91568..ad3f99f6b3f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ProjectInvitationResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ProjectInvitationResponse.java @@ -51,6 +51,10 @@ public class ProjectInvitationResponse extends BaseResponse implements Controlle @Param(description = "the domain name where the project belongs to") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the Domain the project belongs to", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.ACCOUNT) @Param(description = "the account name of the project's owner") private String accountName; @@ -87,6 +91,11 @@ public class ProjectInvitationResponse extends BaseResponse implements Controlle this.domainName = domain; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + @Override public void setAccountName(String accountName) { this.accountName = accountName; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/PurgeExpungedResourcesResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/PurgeExpungedResourcesResponse.java new file mode 100644 index 00000000000..3807d0d5b16 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/PurgeExpungedResourcesResponse.java @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.response; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +public class PurgeExpungedResourcesResponse extends BaseResponse { + + @SerializedName(ApiConstants.RESOURCE_COUNT) + @Param(description = "The count of the purged expunged resources") + private Long resourceCount; + + public Long getResourceCount() { + return resourceCount; + } + + public void setResourceCount(Long resourceCount) { + this.resourceCount = resourceCount; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/RemoteAccessVpnResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/RemoteAccessVpnResponse.java index 0e078bea5bd..54164e3ac34 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/RemoteAccessVpnResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/RemoteAccessVpnResponse.java @@ -65,6 +65,10 @@ public class RemoteAccessVpnResponse extends BaseResponse implements ControlledE @Param(description = "the domain name of the account of the remote access vpn") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the remote access vpn belongs", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.STATE) @Param(description = "the state of the rule") private String state; @@ -104,6 +108,10 @@ public class RemoteAccessVpnResponse extends BaseResponse implements ControlledE this.domainName = name; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } public void setState(String state) { this.state = state; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ResourceCountResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ResourceCountResponse.java index 3a698618585..74511a0f743 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ResourceCountResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ResourceCountResponse.java @@ -45,6 +45,10 @@ public class ResourceCountResponse extends BaseResponse implements ControlledEnt @Param(description = "the domain name for which resource count's are updated") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the resource counts are updated", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.RESOURCE_TYPE) @Param(description = "resource type. Values include 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11. See the resourceType parameter for more information on these values.") private String resourceType; @@ -76,6 +80,11 @@ public class ResourceCountResponse extends BaseResponse implements ControlledEnt this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + public void setResourceType(Resource.ResourceType resourceType) { this.resourceType = Integer.valueOf(resourceType.getOrdinal()).toString(); this.resourceTypeName = resourceType.getName(); diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ResourceLimitResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ResourceLimitResponse.java index 72c1c66f5e8..b5b03873f3f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ResourceLimitResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ResourceLimitResponse.java @@ -40,6 +40,10 @@ public class ResourceLimitResponse extends BaseResponse implements ControlledEnt @Param(description = "the domain name of the resource limit") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the resource limit belongs", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.RESOURCE_TYPE) @Param(description = "resource type. Values include 0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 11. See the resourceType parameter for more information on these values.") private String resourceType; @@ -84,6 +88,10 @@ public class ResourceLimitResponse extends BaseResponse implements ControlledEnt this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } public void setResourceType(Resource.ResourceType resourceType) { this.resourceType = Integer.valueOf(resourceType.getOrdinal()).toString(); this.resourceTypeName = resourceType.getName(); diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ResourceTagResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ResourceTagResponse.java index 44325560534..26e4d19cdba 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ResourceTagResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ResourceTagResponse.java @@ -61,6 +61,10 @@ public class ResourceTagResponse extends BaseResponse implements ControlledViewE @Param(description = "the domain associated with the tag") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the Domain associated with the tag", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.CUSTOMER) @Param(description = "customer associated with the tag") private String customer; @@ -96,6 +100,11 @@ public class ResourceTagResponse extends BaseResponse implements ControlledViewE this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + @Override public void setProjectId(String projectId) { this.projectId = projectId; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/RoleResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/RoleResponse.java index 1861028f0ed..92e3b46139f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/RoleResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/RoleResponse.java @@ -36,6 +36,10 @@ public class RoleResponse extends BaseRoleResponse { @Param(description = "true if role is default, false otherwise") private Boolean isDefault; + @SerializedName(ApiConstants.STATE) + @Param(description = "the state of the role") + private String state; + public void setRoleType(RoleType roleType) { if (roleType != null) { this.roleType = roleType.name(); @@ -45,4 +49,8 @@ public class RoleResponse extends BaseRoleResponse { public void setIsDefault(Boolean isDefault) { this.isDefault = isDefault; } + + public void setState(String state) { + this.state = state; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/SecurityGroupResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/SecurityGroupResponse.java index c96421b0a40..de486b5a374 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/SecurityGroupResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/SecurityGroupResponse.java @@ -63,6 +63,10 @@ public class SecurityGroupResponse extends BaseResponse implements ControlledVie @Param(description = "the domain name of the security group") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the Domain the security group belongs to", since = "4.19.2.0") + private String domainPath; + @SerializedName("ingressrule") @Param(description = "the list of ingress rules associated with the security group", responseObject = SecurityGroupRuleResponse.class) private Set ingressRules; @@ -126,6 +130,11 @@ public class SecurityGroupResponse extends BaseResponse implements ControlledVie this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + public void setSecurityGroupIngressRules(Set securityGroupRules) { this.ingressRules = securityGroupRules; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java index c7740c19214..0622b936f6e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ServiceOfferingResponse.java @@ -234,6 +234,10 @@ public class ServiceOfferingResponse extends BaseResponseWithAnnotations { @Param(description = "true if virtual machine root disk will be encrypted on storage", since = "4.18") private Boolean encryptRoot; + @SerializedName(ApiConstants.PURGE_RESOURCES) + @Param(description = "Whether to cleanup VM and its associated resource upon expunge", since = "4.20") + private Boolean purgeResources; + public ServiceOfferingResponse() { } @@ -555,4 +559,8 @@ public class ServiceOfferingResponse extends BaseResponseWithAnnotations { } public void setEncryptRoot(Boolean encrypt) { this.encryptRoot = encrypt; } + + public void setPurgeResources(Boolean purgeResources) { + this.purgeResources = purgeResources; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/SharedFSProviderResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/SharedFSProviderResponse.java new file mode 100644 index 00000000000..4d92945646f --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/SharedFSProviderResponse.java @@ -0,0 +1,38 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.response; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +public class SharedFSProviderResponse extends BaseResponse { + @SerializedName(ApiConstants.NAME) + @Param(description = "the name of the shared filesystem provider") + private String name; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/SharedFSResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/SharedFSResponse.java new file mode 100644 index 00000000000..bac348fe36e --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/SharedFSResponse.java @@ -0,0 +1,369 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.response; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponseWithTagInformation; +import org.apache.cloudstack.api.EntityReference; +import org.apache.cloudstack.storage.sharedfs.SharedFS; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +import java.util.ArrayList; +import java.util.List; + + +@EntityReference(value = SharedFS.class) +public class SharedFSResponse extends BaseResponseWithTagInformation implements ControlledViewEntityResponse { + + @SerializedName(ApiConstants.ID) + @Param(description = "ID of the shared filesystem") + private String id; + + @SerializedName(ApiConstants.NAME) + @Param(description = "name of the shared filesystem") + private String name; + + @SerializedName(ApiConstants.DESCRIPTION) + @Param(description = "description of the shared filesystem") + private String description; + + @SerializedName(ApiConstants.ZONE_ID) + @Param(description = "ID of the availability zone") + private String zoneId; + + @SerializedName(ApiConstants.ZONE_NAME) + @Param(description = "Name of the availability zone") + private String zoneName; + + @SerializedName(ApiConstants.VIRTUAL_MACHINE_ID) + @Param(description = "ID of the storage fs vm") + private String virtualMachineId; + + @SerializedName(ApiConstants.VIRTUAL_MACHINE_STATE) + @Param(description = "ID of the storage fs vm") + private String virtualMachineState; + + @SerializedName(ApiConstants.VOLUME_NAME) + @Param(description = "name of the storage fs data volume") + private String volumeName; + + @SerializedName(ApiConstants.VOLUME_ID) + @Param(description = "ID of the storage fs data volume") + private String volumeId; + + @SerializedName(ApiConstants.STORAGE) + @Param(description = "name of the storage pool hosting the data volume") + private String storagePoolName; + + @SerializedName(ApiConstants.STORAGE_ID) + @Param(description = "ID of the storage pool hosting the data volume") + private String storagePoolId; + + @SerializedName(ApiConstants.SIZE) + @Param(description = "size of the shared filesystem") + private Long size; + + @SerializedName(ApiConstants.SIZEGB) + @Param(description = "size of the shared filesystem in GiB") + private String sizeGB; + + @SerializedName(ApiConstants.DISK_OFFERING_ID) + @Param(description = "disk offering ID for the shared filesystem") + private String diskOfferingId; + + @SerializedName("diskofferingname") + @Param(description = "disk offering for the shared filesystem") + private String diskOfferingName; + + @SerializedName("iscustomdiskoffering") + @Param(description = "disk offering for the shared filesystem has custom size") + private Boolean isCustomDiskOffering; + + @SerializedName("diskofferingdisplaytext") + @Param(description = "disk offering display text for the shared filesystem") + private String diskOfferingDisplayText; + + @SerializedName(ApiConstants.SERVICE_OFFERING_ID) + @Param(description = "service offering ID for the shared filesystem") + private String serviceOfferingId; + + @SerializedName("serviceofferingname") + @Param(description = "service offering for the shared filesystem") + private String serviceOfferingName; + + @SerializedName(ApiConstants.NETWORK_ID) + @Param(description = "Network ID of the shared filesystem") + private String networkId; + + @SerializedName(ApiConstants.NETWORK_NAME) + @Param(description = "Network name of the shared filesystem") + private String networkName; + + @SerializedName(ApiConstants.NIC) + @Param(description = "the list of nics associated with the shared filesystem", responseObject = NicResponse.class) + private List nics; + + @SerializedName(ApiConstants.PATH) + @Param(description = "path to mount the shared filesystem") + private String path; + + @SerializedName(ApiConstants.STATE) + @Param(description = "the state of the shared filesystem") + private String state; + + @SerializedName(ApiConstants.PROVIDER) + @Param(description = "the shared filesystem provider") + private String provider; + + @SerializedName(ApiConstants.FILESYSTEM) + @Param(description = "the filesystem format") + private String filesystem; + + @SerializedName(ApiConstants.ACCOUNT) + @Param(description = "the account associated with the shared filesystem") + private String accountName; + + @SerializedName(ApiConstants.PROJECT_ID) + @Param(description = "the project ID of the shared filesystem") + private String projectId; + + @SerializedName(ApiConstants.PROJECT) + @Param(description = "the project name of the shared filesystem") + private String projectName; + + @SerializedName(ApiConstants.DOMAIN_ID) + @Param(description = "the ID of the domain associated with the shared filesystem") + private String domainId; + + @SerializedName(ApiConstants.DOMAIN) + @Param(description = "the domain associated with the shared filesystem") + private String domainName; + + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the shared filesystem") + private String domainPath; + + @SerializedName(ApiConstants.PROVISIONINGTYPE) + @Param(description = "provisioning type used in the shared filesystem") + private String provisioningType; + + @SerializedName(ApiConstants.DISK_IO_READ) + @Param(description = "the read (IO) of disk on the shared filesystem") + private Long diskIORead; + + @SerializedName(ApiConstants.DISK_IO_WRITE) + @Param(description = "the write (IO) of disk on the shared filesystem") + private Long diskIOWrite; + + @SerializedName(ApiConstants.DISK_KBS_READ) + @Param(description = "the shared filesystem's disk read in KiB") + private Long diskKbsRead; + + @SerializedName(ApiConstants.DISK_KBS_WRITE) + @Param(description = "the shared filesystem's disk write in KiB") + private Long diskKbsWrite; + + @SerializedName(ApiConstants.VIRTUAL_SIZE) + @Param(description = "the bytes allocated") + private Long virtualSize; + + @SerializedName(ApiConstants.PHYSICAL_SIZE) + @Param(description = "the bytes actually consumed on disk") + private Long physicalSize; + + @SerializedName(ApiConstants.UTILIZATION) + @Param(description = "the disk utilization") + private String utilization; + + @Override + public void setAccountName(String accountName) { + this.accountName = accountName; + } + + @Override + public void setProjectId(String projectId) { + this.projectId = projectId; + } + + @Override + public void setProjectName(String projectName) { + this.projectName = projectName; + } + + @Override + public void setDomainId(String domainId) { + this.domainId = domainId; + } + + @Override + public void setDomainName(String domainName) { + this.domainName = domainName; + } + + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + + public void setId(String id) { + this.id = id; + } + + public void setName(String name) { + this.name = name; + } + + public void setZoneId(String zoneId) { + this.zoneId = zoneId; + } + + public void setZoneName(String zoneName) { + this.zoneName = zoneName; + } + + public void setVirtualMachineId(String virtualMachineId) { + this.virtualMachineId = virtualMachineId; + } + + public void setState(String state) { + this.state = state; + } + + public void setVolumeId(String volumeId) { + this.volumeId = volumeId; + } + + public void setNetworkId(String networkId) { + this.networkId = networkId; + } + + public void setNetworkName(String networkName) { + this.networkName = networkName; + } + + public List getNics() { + return nics; + } + + public void addNic(NicResponse nic) { + if (this.nics == null) { + this.nics = new ArrayList<>(); + } + this.nics.add(nic); + } + + public void setSize(Long size) { + this.size = size; + } + + public void setDescription(String description) { + this.description = description; + } + + public void setPath(String path) { + this.path = path; + } + + public void setVolumeName(String volumeName) { + this.volumeName = volumeName; + } + + public void setStoragePoolName(String storagePoolName) { + this.storagePoolName = storagePoolName; + } + + public void setStoragePoolId(String storagePoolId) { + this.storagePoolId = storagePoolId; + } + + public void setProvider(String provider) { + this.provider = provider; + } + + public void setFilesystem(String filesystem) { + this.filesystem = filesystem; + } + + public void setSizeGB(Long size) { + if (size != null) { + this.sizeGB = String.format("%.2f GiB", size / (1024.0 * 1024.0 * 1024.0)); + } + } + + public void setDiskOfferingId(String diskOfferingId) { + this.diskOfferingId = diskOfferingId; + } + + public void setDiskOfferingName(String diskOfferingName) { + this.diskOfferingName = diskOfferingName; + } + + public void setDiskOfferingDisplayText(String diskOfferingDisplayText) { + this.diskOfferingDisplayText = diskOfferingDisplayText; + } + + public void setServiceOfferingId(String serviceOfferingId) { + this.serviceOfferingId = serviceOfferingId; + } + + public void setServiceOfferingName(String serviceOfferingName) { + this.serviceOfferingName = serviceOfferingName; + } + + public void setProvisioningType(String provisioningType) { + this.provisioningType = provisioningType; + } + + public void setDiskIORead(Long diskIORead) { + this.diskIORead = diskIORead; + } + + public void setDiskIOWrite(Long diskIOWrite) { + this.diskIOWrite = diskIOWrite; + } + + public void setDiskKbsRead(Long diskKbsRead) { + this.diskKbsRead = diskKbsRead; + } + + public void setDiskKbsWrite(Long diskKbsWrite) { + this.diskKbsWrite = diskKbsWrite; + } + + public void setVirtualSize(Long virtualSize) { + this.virtualSize = virtualSize; + } + + public void setPhysicalSize(Long physicalSize) { + this.physicalSize = physicalSize; + } + + public void setUtilization(String utilization) { + this.utilization = utilization; + } + + public void setIsCustomDiskOffering(Boolean isCustomDiskOffering) { + this.isCustomDiskOffering = isCustomDiskOffering; + } + + public void setVirtualMachineState(String virtualMachineState) { + this.virtualMachineState = virtualMachineState; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/Site2SiteCustomerGatewayResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/Site2SiteCustomerGatewayResponse.java index babc9bf4432..4ae140ec573 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/Site2SiteCustomerGatewayResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/Site2SiteCustomerGatewayResponse.java @@ -98,6 +98,10 @@ public class Site2SiteCustomerGatewayResponse extends BaseResponseWithAnnotation @Param(description = "the domain name of the owner") private String domain; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "the domain path of the owner", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.REMOVED) @Param(description = "the date and time the host was removed") private Date removed; @@ -193,4 +197,9 @@ public class Site2SiteCustomerGatewayResponse extends BaseResponseWithAnnotation this.domain = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/Site2SiteVpnConnectionResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/Site2SiteVpnConnectionResponse.java index 1f7509239d1..a9fd0f9703c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/Site2SiteVpnConnectionResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/Site2SiteVpnConnectionResponse.java @@ -120,6 +120,10 @@ public class Site2SiteVpnConnectionResponse extends BaseResponse implements Cont @Param(description = "the domain name of the owner") private String domain; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "the domain path of the owner", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.CREATED) @Param(description = "the date and time the host was created") private Date created; @@ -241,6 +245,11 @@ public class Site2SiteVpnConnectionResponse extends BaseResponse implements Cont this.domain = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + public void setForDisplay(Boolean forDisplay) { this.forDisplay = forDisplay; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/Site2SiteVpnGatewayResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/Site2SiteVpnGatewayResponse.java index cdd8e4f3d87..1e63ba896c7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/Site2SiteVpnGatewayResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/Site2SiteVpnGatewayResponse.java @@ -66,6 +66,10 @@ public class Site2SiteVpnGatewayResponse extends BaseResponse implements Control @Param(description = "the domain name of the owner") private String domain; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "the domain path of the owner", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.REMOVED) @Param(description = "the date and time the host was removed") private Date removed; @@ -119,6 +123,10 @@ public class Site2SiteVpnGatewayResponse extends BaseResponse implements Control this.domain = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } public void setForDisplay(Boolean forDisplay) { this.forDisplay = forDisplay; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/SnapshotResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/SnapshotResponse.java index e160f64ebe9..9f7a7f42dec 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/SnapshotResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/SnapshotResponse.java @@ -47,6 +47,10 @@ public class SnapshotResponse extends BaseResponseWithTagInformation implements @Param(description = "the domain name of the snapshot's account") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the Domain the snapshot's account belongs to", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.PROJECT_ID) @Param(description = "the project id of the snapshot") private String projectId; @@ -71,6 +75,10 @@ public class SnapshotResponse extends BaseResponseWithTagInformation implements @Param(description = "type of the disk volume") private String volumeType; + @SerializedName(ApiConstants.VOLUME_STATE) + @Param(description = "state of the disk volume") + private String volumeState; + @SerializedName(ApiConstants.CREATED) @Param(description = " the date the snapshot was created") private Date created; @@ -183,6 +191,11 @@ public class SnapshotResponse extends BaseResponseWithTagInformation implements this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + public void setSnapshotType(String snapshotType) { this.snapshotType = snapshotType; } @@ -199,6 +212,10 @@ public class SnapshotResponse extends BaseResponseWithTagInformation implements this.volumeType = volumeType; } + public void setVolumeState(String volumeState) { + this.volumeState = volumeState; + } + public void setCreated(Date created) { this.created = created; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/StaticRouteResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/StaticRouteResponse.java index 79c7f9040a1..51f8a130383 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/StaticRouteResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/StaticRouteResponse.java @@ -70,6 +70,10 @@ public class StaticRouteResponse extends BaseResponse implements ControlledEntit @Param(description = "the domain associated with the static route") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "the domain path associated with the static route", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.TAGS) @Param(description = "the list of resource tags associated with static route", responseObject = ResourceTagResponse.class) private List tags; @@ -114,6 +118,10 @@ public class StaticRouteResponse extends BaseResponse implements ControlledEntit this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } @Override public void setProjectId(String projectId) { this.projectId = projectId; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java index 183290ec9eb..06d5103d731 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java @@ -97,10 +97,18 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations { @Param(description = "total min IOPS currently in use by volumes") private Long allocatedIops; + @SerializedName(ApiConstants.STORAGE_CUSTOM_STATS) + @Param(description = "the storage pool custom stats", since = "4.18.1") + private Map customStats; + @SerializedName("tags") @Param(description = "the tags for the storage pool") private String tags; + @SerializedName(ApiConstants.NFS_MOUNT_OPTIONS) + @Param(description = "the nfs mount options for the storage pool", since = "4.19.1") + private String nfsMountOpts; + @SerializedName(ApiConstants.IS_TAG_A_RULE) @Param(description = ApiConstants.PARAMETER_DESCRIPTION_IS_TAG_A_RULE) private Boolean isTagARule; @@ -133,6 +141,10 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations { @Param(description = "the storage pool capabilities") private Map caps; + @SerializedName(ApiConstants.MANAGED) + @Param(description = "whether this pool is managed or not") + private Boolean managed; + public Map getCaps() { return caps; } @@ -300,6 +312,14 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations { this.allocatedIops = allocatedIops; } + public Map getCustomStats() { + return customStats; + } + + public void setCustomStats(Map customStats) { + this.customStats = customStats; + } + public String getTags() { return tags; } @@ -347,4 +367,32 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations { public void setProvider(String provider) { this.provider = provider; } + + public String getNfsMountOpts() { + return nfsMountOpts; + } + + public void setNfsMountOpts(String nfsMountOpts) { + this.nfsMountOpts = nfsMountOpts; + } + + public Long getAllocatedIops() { + return allocatedIops; + } + + public Boolean getTagARule() { + return isTagARule; + } + + public void setTagARule(Boolean tagARule) { + isTagARule = tagARule; + } + + public Boolean getManaged() { + return managed; + } + + public void setManaged(Boolean managed) { + this.managed = managed; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java index 6a2d17d28fe..dac3c0554a3 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java @@ -135,6 +135,10 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements @Param(description = "the name of the domain to which the template belongs") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the Domain the template belongs to", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.DOMAIN_ID) @Param(description = "the ID of the domain to which the template belongs") private String domainId; @@ -179,6 +183,10 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements @Param(description = "Lists the download progress of a template across all secondary storages") private List> downloadDetails; + @SerializedName(ApiConstants.ARCH) + @Param(description = "CPU Arch of the template", since = "4.20") + private String arch; + @SerializedName(ApiConstants.BITS) @Param(description = "the processor bit size", since = "4.10") private int bits; @@ -359,6 +367,11 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + @Override public void setDomainId(String domainId) { this.domainId = domainId; @@ -520,4 +533,8 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements public void setUserDataParams(String userDataParams) { this.userDataParams = userDataParams; } + + public void setArch(String arch) { + this.arch = arch; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/UsageRecordResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/UsageRecordResponse.java index 4522315b499..4aeded64287 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/UsageRecordResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/UsageRecordResponse.java @@ -52,6 +52,10 @@ public class UsageRecordResponse extends BaseResponseWithTagInformation implemen @Param(description = "the domain the resource is associated with") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the usage reocrd belongs", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.ZONE_ID) @Param(description = "the zone ID") private String zoneId; @@ -277,6 +281,10 @@ public class UsageRecordResponse extends BaseResponseWithTagInformation implemen this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } public void setNetworkId(String networkId) { this.networkId = networkId; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/UsageTypeResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/UsageTypeResponse.java index 83b97f00c15..5beef5ac556 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/UsageTypeResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/UsageTypeResponse.java @@ -25,12 +25,16 @@ import com.cloud.serializer.Param; public class UsageTypeResponse extends BaseResponse { - @SerializedName("usagetypeid") - @Param(description = "usage type") + @SerializedName("id") + @Param(description = "Usage type ID") private Integer usageType; + @SerializedName(ApiConstants.NAME) + @Param(description = "Usage type name") + private String name; + @SerializedName(ApiConstants.DESCRIPTION) - @Param(description = "description of usage type") + @Param(description = "Usage type description") private String description; public String getDescription() { @@ -49,10 +53,10 @@ public class UsageTypeResponse extends BaseResponse { this.usageType = usageType; } - public UsageTypeResponse(Integer usageType, String description) { + public UsageTypeResponse(Integer usageType, String name, String description) { this.usageType = usageType; + this.name = name; this.description = description; setObjectName("usagetype"); } - } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/UserDataResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/UserDataResponse.java index e69094c8f80..2dfc66fa7d5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/UserDataResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/UserDataResponse.java @@ -54,6 +54,10 @@ public class UserDataResponse extends BaseResponseWithAnnotations implements Con @SerializedName(ApiConstants.DOMAIN) @Param(description="the domain name of the userdata owner") private String domain; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the userdata owner belongs", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.USER_DATA) @Param(description="base64 encoded userdata content") private String userData; @@ -143,4 +147,9 @@ public class UserDataResponse extends BaseResponseWithAnnotations implements Con public void setDomainName(String domain) { this.domain = domain; } + + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/UserVmResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/UserVmResponse.java index 5a0ea77a4e7..1f4b493fba2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/UserVmResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/UserVmResponse.java @@ -37,6 +37,7 @@ import com.cloud.serializer.Param; import com.cloud.uservm.UserVm; import com.cloud.vm.VirtualMachine; import com.google.gson.annotations.SerializedName; +import org.apache.commons.collections.CollectionUtils; @SuppressWarnings("unused") @EntityReference(value = {VirtualMachine.class, UserVm.class, VirtualRouter.class}) @@ -81,6 +82,10 @@ public class UserVmResponse extends BaseResponseWithTagInformation implements Co @Param(description = "the name of the domain in which the virtual machine exists") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain in which the virtual machine exists", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.CREATED) @Param(description = "the date when this virtual machine was created") private Date created; @@ -273,6 +278,10 @@ public class UserVmResponse extends BaseResponseWithTagInformation implements Co @Param(description = "the hypervisor on which the template runs") private String hypervisor; + @SerializedName(ApiConstants.IP_ADDRESS) + @Param(description = "the VM's primary IP address") + private String ipAddress; + @SerializedName(ApiConstants.PUBLIC_IP_ID) @Param(description = "public IP address id associated with vm via Static nat rule") private String publicIpId; @@ -311,6 +320,10 @@ public class UserVmResponse extends BaseResponseWithTagInformation implements Co @Param(description = "true if vm contains XS/VMWare tools inorder to support dynamic scaling of VM cpu/memory.") private Boolean isDynamicallyScalable; + @SerializedName(ApiConstants.DELETE_PROTECTION) + @Param(description = "true if vm has delete protection.", since = "4.20.0") + private boolean deleteProtection; + @SerializedName(ApiConstants.SERVICE_STATE) @Param(description = "State of the Service from LB rule") private String serviceState; @@ -379,6 +392,10 @@ public class UserVmResponse extends BaseResponseWithTagInformation implements Co @Param(description = "VNF details", since = "4.19.0") private Map vnfDetails; + @SerializedName((ApiConstants.VM_TYPE)) + @Param(description = "User VM type", since = "4.20.0") + private String vmType; + public UserVmResponse() { securityGroupList = new LinkedHashSet<>(); nics = new TreeSet<>(Comparator.comparingInt(x -> Integer.parseInt(x.getDeviceId()))); @@ -627,6 +644,10 @@ public class UserVmResponse extends BaseResponseWithTagInformation implements Co return hypervisor; } + public String getIpAddress() { + return ipAddress; + } + public String getPublicIpId() { return publicIpId; } @@ -694,6 +715,10 @@ public class UserVmResponse extends BaseResponseWithTagInformation implements Co this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } public void setCreated(Date created) { this.created = created; } @@ -863,6 +888,13 @@ public class UserVmResponse extends BaseResponseWithTagInformation implements Co public void setNics(Set nics) { this.nics = nics; + setIpAddress(nics); + } + + public void setIpAddress(final Set nics) { + if (CollectionUtils.isNotEmpty(nics)) { + this.ipAddress = nics.iterator().next().getIpaddress(); + } } public void addNic(NicResponse nic) { @@ -967,6 +999,14 @@ public class UserVmResponse extends BaseResponseWithTagInformation implements Co isDynamicallyScalable = dynamicallyScalable; } + public boolean isDeleteProtection() { + return deleteProtection; + } + + public void setDeleteProtection(boolean deleteProtection) { + this.deleteProtection = deleteProtection; + } + public String getOsTypeId() { return osTypeId; } @@ -1117,4 +1157,16 @@ public class UserVmResponse extends BaseResponseWithTagInformation implements Co } this.vnfDetails.put(key,value); } + + public void setVmType(String vmType) { + this.vmType = vmType; + } + + public String getVmType() { + return vmType; + } + + public void setIpAddress(String ipAddress) { + this.ipAddress = ipAddress; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/VMSnapshotResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/VMSnapshotResponse.java index 9b553ed0744..703f27b537c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/VMSnapshotResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/VMSnapshotResponse.java @@ -108,6 +108,10 @@ public class VMSnapshotResponse extends BaseResponseWithTagInformation implement @Param(description = "the domain associated with the disk volume") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the disk volume belongs", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.HYPERVISOR) @Param(description = "the type of hypervisor on which snapshot is stored") private String hypervisor; @@ -261,6 +265,11 @@ public class VMSnapshotResponse extends BaseResponseWithTagInformation implement this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + public void setTags(Set tags) { this.tags = tags; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/VMUserDataResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/VMUserDataResponse.java index 1b739e56442..cf819491c2c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/VMUserDataResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/VMUserDataResponse.java @@ -30,7 +30,7 @@ public class VMUserDataResponse extends BaseResponse { private String vmId; @SerializedName(ApiConstants.USER_DATA) - @Param(description = "Base 64 encoded VM user data") + @Param(description = "Base64 encoded VM user data") private String userData; public void setUserData(String userData) { diff --git a/api/src/main/java/org/apache/cloudstack/api/response/VirtualMachineResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/VirtualMachineResponse.java new file mode 100644 index 00000000000..7d676292b8a --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/VirtualMachineResponse.java @@ -0,0 +1,124 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; + +import com.cloud.serializer.Param; +import com.cloud.vm.VirtualMachine; +import com.google.gson.annotations.SerializedName; + +@EntityReference(value = VirtualMachine.class) +public class VirtualMachineResponse extends BaseResponse { + @SerializedName("id") + @Param(description = "the ID of the VM") + private String id; + + @SerializedName("type") + @Param(description = "the type of VM") + private String type; + + @SerializedName("name") + @Param(description = "the name of the VM") + private String name; + + @SerializedName("clusterid") + @Param(description = "the cluster ID for the VM") + private String clusterId; + + @SerializedName("clustername") + @Param(description = "the cluster name for the VM") + private String clusterName; + + @SerializedName("hostid") + @Param(description = "the host ID for the VM") + private String hostId; + + @SerializedName("hostname") + @Param(description = "the hostname for the VM") + private String hostName; + + @Override + public String getObjectId() { + return this.getId(); + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getVmType() { + return type; + } + + public void setVmType(String type) { + this.type = type; + } + + public String getVmName() { + return name; + } + + public void setVmName(String name) { + this.name = name; + } + + public String getClusterId() { + return clusterId; + } + + public void setClusterId(String clusterId) { + this.clusterId = clusterId; + } + + public String getClusterName() { + return clusterName; + } + + public void setClusterName(String clusterName) { + this.clusterName = clusterName; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getHostId() { + return hostId; + } + + public void setHostId(String hostId) { + this.hostId = hostId; + } + + public String getHostName() { + return hostName; + } + + public void setHostName(String hostName) { + this.hostName = hostName; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/VirtualRouterProviderResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/VirtualRouterProviderResponse.java index c3b46710f6f..b9d18e23810 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/VirtualRouterProviderResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/VirtualRouterProviderResponse.java @@ -60,6 +60,10 @@ public class VirtualRouterProviderResponse extends BaseResponse implements Contr @Param(description = "the domain associated with the provider") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the provider belongs", since = "4.19.2.0") + private String domainPath; + @Override public void setAccountName(String accountName) { this.accountName = accountName; @@ -79,6 +83,10 @@ public class VirtualRouterProviderResponse extends BaseResponse implements Contr this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } @Override public void setProjectId(String projectId) { this.projectId = projectId; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/VlanIpRangeResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/VlanIpRangeResponse.java index aac6dd3c577..1492c23e882 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/VlanIpRangeResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/VlanIpRangeResponse.java @@ -55,6 +55,10 @@ public class VlanIpRangeResponse extends BaseResponse implements ControlledEntit @Param(description = "the domain name of the VLAN IP range") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the VLAN IP range belongs", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.POD_ID) @Param(description = "the Pod ID for the VLAN IP range") private String podId; @@ -166,6 +170,11 @@ public class VlanIpRangeResponse extends BaseResponse implements ControlledEntit this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + public void setPodId(String podId) { this.podId = podId; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/VolumeResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/VolumeResponse.java index 0d502a6d7a7..209ca57c50d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/VolumeResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/VolumeResponse.java @@ -93,7 +93,7 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co @Param(description = "display name of the virtual machine") private String virtualMachineDisplayName; - @SerializedName("vmstate") + @SerializedName(ApiConstants.VIRTUAL_MACHINE_STATE) @Param(description = "state of the virtual machine") private String virtualMachineState; @@ -145,6 +145,10 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co @Param(description = "the domain associated with the disk volume") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the Domain the disk volume belongs to", since = "4.19.2.0") + private String domainPath; + @SerializedName("storagetype") @Param(description = "shared or local storage") private String storageType; @@ -211,7 +215,7 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co @SerializedName("destroyed") @Param(description = "the boolean state of whether the volume is destroyed or not") - private Boolean destroyed; + private boolean destroyed; @SerializedName(ApiConstants.SERVICE_OFFERING_ID) @Param(description = "ID of the service offering for root disk") @@ -227,7 +231,7 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co @SerializedName("isextractable") @Param(description = "true if the volume is extractable, false otherwise") - private Boolean extractable; + private boolean extractable; @SerializedName(ApiConstants.STATUS) @Param(description = "the status of the volume") @@ -235,7 +239,7 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co @SerializedName(ApiConstants.DISPLAY_VOLUME) @Param(description = "an optional field whether to the display the volume to the end user or not.", authorized = {RoleType.Admin}) - private Boolean displayVolume; + private boolean displayVolume; @SerializedName(ApiConstants.PATH) @Param(description = "the path of the volume") @@ -257,12 +261,16 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co @Param(description = "true if storage snapshot is supported for the volume, false otherwise", since = "4.16") private boolean supportsStorageSnapshot; + @SerializedName(ApiConstants.DELETE_PROTECTION) + @Param(description = "true if volume has delete protection.", since = "4.20.0") + private boolean deleteProtection; + @SerializedName(ApiConstants.PHYSICAL_SIZE) - @Param(description = "the bytes allocated") + @Param(description = "the bytes actually consumed on disk") private Long physicalsize; @SerializedName(ApiConstants.VIRTUAL_SIZE) - @Param(description = "the bytes actually consumed on disk") + @Param(description = "the bytes allocated") private Long virtualsize; @SerializedName(ApiConstants.UTILIZATION) @@ -290,13 +298,17 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co private String externalUuid; @SerializedName(ApiConstants.VOLUME_CHECK_RESULT) - @Param(description = "details for the volume check result, they may vary for different hypervisors, since = 4.19.1") + @Param(description = "details for the volume check result, they may vary for different hypervisors", since = "4.19.1") private Map volumeCheckResult; @SerializedName(ApiConstants.VOLUME_REPAIR_RESULT) - @Param(description = "details for the volume repair result, they may vary for different hypervisors, since = 4.19.1") + @Param(description = "details for the volume repair result, they may vary for different hypervisors", since = "4.19.1") private Map volumeRepairResult; + @SerializedName(ApiConstants.ENCRYPT_FORMAT) + @Param(description = "the format of the disk encryption if applicable", since = "4.19.1") + private String encryptionFormat; + public String getPath() { return path; } @@ -314,11 +326,11 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co return this.getId(); } - public Boolean isDestroyed() { + public boolean isDestroyed() { return destroyed; } - public void setDestroyed(Boolean destroyed) { + public void setDestroyed(boolean destroyed) { this.destroyed = destroyed; } @@ -405,6 +417,11 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co this.domainName = domainName; } + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + public void setStorageType(String storageType) { this.storageType = storageType; } @@ -517,7 +534,7 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co this.serviceOfferingDisplayText = serviceOfferingDisplayText; } - public void setExtractable(Boolean extractable) { + public void setExtractable(boolean extractable) { this.extractable = extractable; } @@ -535,7 +552,7 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co this.projectName = projectName; } - public void setDisplayVolume(Boolean displayVm) { + public void setDisplayVolume(boolean displayVm) { this.displayVolume = displayVm; } @@ -571,6 +588,14 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co return this.supportsStorageSnapshot; } + public boolean isDeleteProtection() { + return deleteProtection; + } + + public void setDeleteProtection(boolean deleteProtection) { + this.deleteProtection = deleteProtection; + } + public String getIsoId() { return isoId; } @@ -751,7 +776,7 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co return serviceOfferingDisplayText; } - public Boolean getExtractable() { + public boolean isExtractable() { return extractable; } @@ -759,7 +784,7 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co return status; } - public Boolean getDisplayVolume() { + public boolean isDisplayVolume() { return displayVolume; } @@ -842,4 +867,8 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co public void setVolumeRepairResult(Map volumeRepairResult) { this.volumeRepairResult = volumeRepairResult; } + + public void setEncryptionFormat(String encryptionFormat) { + this.encryptionFormat = encryptionFormat; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/VpcOfferingResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/VpcOfferingResponse.java index ce00827f06d..b11764da7d9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/VpcOfferingResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/VpcOfferingResponse.java @@ -70,9 +70,9 @@ public class VpcOfferingResponse extends BaseResponse { @Param(description = "true if vpc offering can be used by NSX networks only") private Boolean forNsx; - @SerializedName(ApiConstants.NSX_MODE) - @Param(description = "Mode in which the network will operate. This parameter is only relevant for NSX offerings") - private String nsxMode; + @SerializedName(ApiConstants.NETWORK_MODE) + @Param(description = "Mode in which the network will operate. The valid values are NATTED and ROUTED") + private String networkMode; @SerializedName(ApiConstants.DOMAIN_ID) @Param(description = "the domain ID(s) this disk offering belongs to. Ignore this information as it is not currently applicable.") @@ -94,6 +94,14 @@ public class VpcOfferingResponse extends BaseResponse { @Param(description = "the internet protocol of the vpc offering") private String internetProtocol; + @SerializedName(ApiConstants.SPECIFY_AS_NUMBER) + @Param(description = "true if network offering supports choosing AS numbers") + private Boolean specifyAsNumber; + + @SerializedName(ApiConstants.ROUTING_MODE) + @Param(description = "the routing mode for the network offering, supported types are Static or Dynamic.") + private String routingMode; + public void setId(String id) { this.id = id; } @@ -150,8 +158,8 @@ public class VpcOfferingResponse extends BaseResponse { this.forNsx = forNsx; } - public void setNsxMode(String nsxMode) { - this.nsxMode = nsxMode; + public void setNetworkMode(String networkMode) { + this.networkMode = networkMode; } public String getZoneId() { @@ -177,4 +185,20 @@ public class VpcOfferingResponse extends BaseResponse { public void setInternetProtocol(String internetProtocol) { this.internetProtocol = internetProtocol; } + + public Boolean getSpecifyAsNumber() { + return specifyAsNumber; + } + + public void setSpecifyAsNumber(Boolean specifyAsNumber) { + this.specifyAsNumber = specifyAsNumber; + } + + public String getRoutingMode() { + return routingMode; + } + + public void setRoutingMode(String routingMode) { + this.routingMode = routingMode; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/VpcResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/VpcResponse.java index 610416d7b0e..56479506686 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/VpcResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/VpcResponse.java @@ -17,6 +17,7 @@ package org.apache.cloudstack.api.response; import java.util.Date; +import java.util.LinkedHashSet; import java.util.List; import java.util.Set; @@ -96,6 +97,10 @@ public class VpcResponse extends BaseResponseWithAnnotations implements Controll @Param(description = "the domain name of the owner") private String domain; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "the domain path of the owner", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.NETWORK) @Param(description = "the list of networks belongign to the VPC", responseObject = NetworkResponse.class) private List networks; @@ -156,6 +161,26 @@ public class VpcResponse extends BaseResponseWithAnnotations implements Controll @Param(description = "the second IPv6 DNS for the VPC", since = "4.18.0") private String ipv6Dns2; + @SerializedName(ApiConstants.IPV4_ROUTING) + @Param(description = "The IPv4 routing mode of VPC", since = "4.20.0") + private String ipv4Routing; + + @SerializedName(ApiConstants.IPV4_ROUTES) + @Param(description = "The routes for the VPC to ease adding route in upstream router", since = "4.20.0") + private Set ipv4Routes; + + @SerializedName(ApiConstants.AS_NUMBER_ID) + @Param(description = "UUID of AS NUMBER", since = "4.20.0") + private String asNumberId; + + @SerializedName(ApiConstants.AS_NUMBER) + @Param(description = "AS NUMBER", since = "4.20.0") + private Long asNumber; + + @SerializedName(ApiConstants.BGP_PEERS) + @Param(description = "The BGP peers for the VPC", since = "4.20.0") + private Set bgpPeers; + public void setId(final String id) { this.id = id; } @@ -209,6 +234,11 @@ public class VpcResponse extends BaseResponseWithAnnotations implements Controll domain = domainName; } + @Override + public void setDomainPath(String path) { + this.domainPath = path; + } + public void setZoneId(final String zoneId) { this.zoneId = zoneId; } @@ -270,6 +300,18 @@ public class VpcResponse extends BaseResponseWithAnnotations implements Controll this.icon = icon; } + public void setIpv4Routing(String ipv4Routing) { + this.ipv4Routing = ipv4Routing; + } + + public void setIpv4Routes(Set ipv4Routes) { + this.ipv4Routes = ipv4Routes; + } + + public void addIpv4Route(Ipv4RouteResponse ipv4Route) { + this.ipv4Routes.add(ipv4Route); + } + public void setIpv6Routes(Set ipv6Routes) { this.ipv6Routes = ipv6Routes; } @@ -297,4 +339,23 @@ public class VpcResponse extends BaseResponseWithAnnotations implements Controll public void setIpv6Dns2(String ipv6Dns2) { this.ipv6Dns2 = ipv6Dns2; } + + public void setAsNumber(long asNumber) { + this.asNumber = asNumber; + } + + public void setAsNumberId(String asNumberId) { + this.asNumberId = asNumberId; + } + + public void setBgpPeers(Set bgpPeers) { + this.bgpPeers = bgpPeers; + } + + public void addBgpPeer(BgpPeerResponse bgpPeer) { + if (this.bgpPeers == null) { + this.setBgpPeers(new LinkedHashSet<>()); + } + this.bgpPeers.add(bgpPeer); + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/VpnUsersResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/VpnUsersResponse.java index d3e4d941678..3247f534133 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/VpnUsersResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/VpnUsersResponse.java @@ -48,6 +48,10 @@ public class VpnUsersResponse extends BaseResponse implements ControlledEntityRe @Param(description = "the domain name of the account of the remote access vpn") private String domainName; + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the remote access vpn belongs", since = "4.19.2.0") + private String domainPath; + @SerializedName(ApiConstants.PROJECT_ID) @Param(description = "the project id of the vpn") private String projectId; @@ -83,6 +87,11 @@ public class VpnUsersResponse extends BaseResponse implements ControlledEntityRe this.domainName = name; } + @Override + public void setDomainPath(String path) { + this.domainPath = path; + } + @Override public void setProjectId(String projectId) { this.projectId = projectId; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ZoneResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ZoneResponse.java index a898cd9d577..143dfad0eaf 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ZoneResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ZoneResponse.java @@ -95,7 +95,7 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso @SerializedName("securitygroupsenabled") @Param(description = "true if security groups support is enabled, false otherwise") - private Boolean securityGroupsEnabled; + private boolean securityGroupsEnabled; @SerializedName("allocationstate") @Param(description = "the allocation state of the cluster") @@ -111,11 +111,11 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso @SerializedName("capacity") @Param(description = "the capacity of the Zone", responseObject = CapacityResponse.class) - private List capacitites; + private List capacities; @SerializedName(ApiConstants.LOCAL_STORAGE_ENABLED) @Param(description = "true if local storage offering enabled, false otherwise") - private Boolean localStorageEnabled; + private boolean localStorageEnabled; @SerializedName(ApiConstants.TAGS) @Param(description = "the list of resource tags associated with zone.", responseObject = ResourceTagResponse.class, since = "4.3") @@ -149,6 +149,14 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso @Param(description = "true, if zone is NSX enabled", since = "4.20.0") private boolean nsxEnabled = false; + @SerializedName(ApiConstants.MULTI_ARCH) + @Param(description = "true, if zone contains clusters and hosts from different CPU architectures", since = "4.20") + private boolean multiArch; + + @SerializedName(ApiConstants.ASN_RANGE) + @Param(description = "AS Number Range") + private String asnRange; + public ZoneResponse() { tags = new LinkedHashSet(); } @@ -201,7 +209,7 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso this.networkType = networkType; } - public void setSecurityGroupsEnabled(Boolean securityGroupsEnabled) { + public void setSecurityGroupsEnabled(boolean securityGroupsEnabled) { this.securityGroupsEnabled = securityGroupsEnabled; } @@ -217,15 +225,15 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso this.dhcpProvider = dhcpProvider; } - public void setCapacitites(List capacitites) { - this.capacitites = capacitites; + public void setCapacities(List capacities) { + this.capacities = capacities; } public void setDomainName(String domainName) { this.domainName = domainName; } - public void setLocalStorageEnabled(Boolean localStorageEnabled) { + public void setLocalStorageEnabled(boolean localStorageEnabled) { this.localStorageEnabled = localStorageEnabled; } @@ -312,10 +320,6 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso return networkType; } - public boolean isSecurityGroupsEnabled() { - return securityGroupsEnabled; - } - public String getAllocationState() { return allocationState; } @@ -328,12 +332,8 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso return dhcpProvider; } - public List getCapacitites() { - return capacitites; - } - - public boolean isLocalStorageEnabled() { - return localStorageEnabled; + public List getCapacities() { + return capacities; } public Set getTags() { @@ -344,6 +344,30 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso return resourceDetails; } + public boolean isSecurityGroupsEnabled() { + return securityGroupsEnabled; + } + + public boolean isLocalStorageEnabled() { + return localStorageEnabled; + } + + public Boolean getAllowUserSpecifyVRMtu() { + return allowUserSpecifyVRMtu; + } + + public Integer getRouterPrivateInterfaceMaxMtu() { + return routerPrivateInterfaceMaxMtu; + } + + public Integer getRouterPublicInterfaceMaxMtu() { + return routerPublicInterfaceMaxMtu; + } + + public boolean isNsxEnabled() { + return nsxEnabled; + } + @Override public void setResourceIconResponse(ResourceIconResponse resourceIconResponse) { this.resourceIconResponse = resourceIconResponse; @@ -376,4 +400,16 @@ public class ZoneResponse extends BaseResponseWithAnnotations implements SetReso public void setNsxEnabled(boolean nsxEnabled) { this.nsxEnabled = nsxEnabled; } + + public void setMultiArch(boolean multiArch) { + this.multiArch = multiArch; + } + + public void setAsnRange(String asnRange) { + this.asnRange = asnRange; + } + + public String getAsnRange() { + return asnRange; + } } diff --git a/api/src/main/java/org/apache/cloudstack/backup/Backup.java b/api/src/main/java/org/apache/cloudstack/backup/Backup.java index f369367957d..f21f20adb33 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/Backup.java +++ b/api/src/main/java/org/apache/cloudstack/backup/Backup.java @@ -18,6 +18,7 @@ package org.apache.cloudstack.backup; import java.util.Date; +import java.util.List; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.api.Identity; @@ -134,11 +135,13 @@ public interface Backup extends ControlledEntity, InternalIdentity, Identity { } long getVmId(); + long getBackupOfferingId(); String getExternalId(); String getType(); Date getDate(); Backup.Status getStatus(); Long getSize(); Long getProtectedSize(); + List getBackedUpVolumes(); long getZoneId(); } diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java b/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java index 7b39804c738..8b45bb4ee5e 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java @@ -107,7 +107,7 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer * @param vmId * @return */ - BackupSchedule listBackupSchedule(Long vmId); + List listBackupSchedule(Long vmId); /** * Deletes VM backup schedule for a VM diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java b/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java index 9c1b14ae60f..d36dfb7360f 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java @@ -93,7 +93,7 @@ public interface BackupProvider { /** * Restore a volume from a backup */ - Pair restoreBackedUpVolume(Backup backup, String volumeUuid, String hostIp, String dataStoreUuid); + Pair restoreBackedUpVolume(Backup backup, String volumeUuid, String hostIp, String dataStoreUuid, Pair vmNameAndState); /** * Returns backup metrics for a list of VMs in a zone diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupRepository.java b/api/src/main/java/org/apache/cloudstack/backup/BackupRepository.java new file mode 100644 index 00000000000..8e5c9740e69 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupRepository.java @@ -0,0 +1,34 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. +package org.apache.cloudstack.backup; + +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + +import java.util.Date; + +public interface BackupRepository extends InternalIdentity, Identity { + String getProvider(); + long getZoneId(); + String getName(); + String getType(); + String getAddress(); + String getMountOptions(); + Long getCapacityBytes(); + Long getUsedBytes(); + Date getCreated(); +} diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupRepositoryService.java b/api/src/main/java/org/apache/cloudstack/backup/BackupRepositoryService.java new file mode 100644 index 00000000000..ae71053e400 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupRepositoryService.java @@ -0,0 +1,34 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.backup; + +import com.cloud.utils.Pair; +import org.apache.cloudstack.api.command.user.backup.repository.AddBackupRepositoryCmd; +import org.apache.cloudstack.api.command.user.backup.repository.DeleteBackupRepositoryCmd; +import org.apache.cloudstack.api.command.user.backup.repository.ListBackupRepositoriesCmd; + +import java.util.List; + +public interface BackupRepositoryService { + BackupRepository addBackupRepository(AddBackupRepositoryCmd cmd); + boolean deleteBackupRepository(DeleteBackupRepositoryCmd cmd); + Pair, Integer> listBackupRepositories(ListBackupRepositoriesCmd cmd); + +} diff --git a/api/src/main/java/org/apache/cloudstack/ca/CAManager.java b/api/src/main/java/org/apache/cloudstack/ca/CAManager.java index 12a9d3d7b41..b0fb1ac73c2 100644 --- a/api/src/main/java/org/apache/cloudstack/ca/CAManager.java +++ b/api/src/main/java/org/apache/cloudstack/ca/CAManager.java @@ -77,6 +77,14 @@ public interface CAManager extends CAService, Configurable, PluggableService { "15", "The number of days before expiry of a client certificate, the validations are checked. Admins are alerted when auto-renewal is not allowed, otherwise auto-renewal is attempted.", true, ConfigKey.Scope.Cluster); + + ConfigKey CertManagementCustomSubjectAlternativeName = new ConfigKey<>("Advanced", String.class, + "ca.framework.cert.management.custom.san", + "cloudstack.internal", + "The custom Subject Alternative Name that will be added to the management server certificate. " + + "The actual implementation will depend on the configured CA provider.", + false); + /** * Returns a list of available CA provider plugins * @return returns list of CAProvider diff --git a/api/src/main/java/org/apache/cloudstack/datacenter/DataCenterIpv4GuestSubnet.java b/api/src/main/java/org/apache/cloudstack/datacenter/DataCenterIpv4GuestSubnet.java new file mode 100644 index 00000000000..90d55cc5751 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/datacenter/DataCenterIpv4GuestSubnet.java @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.datacenter; + +import java.util.Date; + +import org.apache.cloudstack.acl.InfrastructureEntity; +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + +public interface DataCenterIpv4GuestSubnet extends InfrastructureEntity, InternalIdentity, Identity { + Long getDataCenterId(); + + String getSubnet(); + + Long getDomainId(); + + Long getAccountId(); + + Date getCreated(); +} diff --git a/api/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceResponse.java b/api/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceResponse.java new file mode 100644 index 00000000000..fc116cbb91e --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceResponse.java @@ -0,0 +1,44 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.dedicated; + +import com.cloud.dc.DedicatedResources; +import com.cloud.serializer.Param; + +import com.google.gson.annotations.SerializedName; + +import org.apache.cloudstack.api.BaseResponse; + +public class DedicatedResourceResponse extends BaseResponse { + @SerializedName("resourceid") + @Param(description = "the ID of the resource") + private String resourceId; + + @SerializedName("resourcename") + @Param(description = "the name of the resource") + private String resourceName; + + @SerializedName("resourcetype") + @Param(description = "the type of the resource") + private DedicatedResources.Type resourceType; + + public DedicatedResourceResponse(String resourceId, String resourceName, DedicatedResources.Type resourceType) { + this.resourceId = resourceId; + this.resourceName = resourceName; + this.resourceType = resourceType; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/network/BgpPeer.java b/api/src/main/java/org/apache/cloudstack/network/BgpPeer.java new file mode 100644 index 00000000000..e1d7eca0a03 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/network/BgpPeer.java @@ -0,0 +1,50 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.network; + +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + +import java.util.Date; + +public interface BgpPeer extends Identity, InternalIdentity { + + Long getDomainId(); + + Long getAccountId(); + + enum State { + Active, Add, Revoke + } + + enum Detail { + EBGP_MultiHop + } + + long getDataCenterId(); + + String getIp4Address(); + + String getIp6Address(); + + Long getAsNumber(); + + String getPassword(); + + Date getCreated(); +} diff --git a/api/src/main/java/org/apache/cloudstack/network/BgpPeerTO.java b/api/src/main/java/org/apache/cloudstack/network/BgpPeerTO.java new file mode 100644 index 00000000000..b0503314616 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/network/BgpPeerTO.java @@ -0,0 +1,91 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.network; + +import java.util.Map; + +public class BgpPeerTO { + Long peerId; + Long peerAsNumber; + String ip4Address; + String ip6Address; + String peerPassword; + Long networkId; + Long networkAsNumber; + String guestIp4Cidr; + String guestIp6Cidr; + + Map details; + + public BgpPeerTO(Long peerId, String ip4Address, String ip6Address, Long peerAsNumber, String peerPassword, + Long networkId, Long networkAsNumber, String guestIp4Cidr, String guestIp6Cidr, Map details) { + this.peerId = peerId; + this.ip4Address = ip4Address; + this.ip6Address = ip6Address; + this.peerAsNumber = peerAsNumber; + this.peerPassword = peerPassword; + this.networkId = networkId; + this.networkAsNumber = networkAsNumber; + this.guestIp4Cidr = guestIp4Cidr; + this.guestIp6Cidr = guestIp6Cidr; + this.details = details; + } + + public BgpPeerTO(Long networkId) { + this.networkId = networkId; + } + + public Long getPeerId() { + return peerId; + } + + public String getIp4Address() { + return ip4Address; + } + + public String getIp6Address() { + return ip6Address; + } + + public Long getPeerAsNumber() { + return peerAsNumber; + } + + public String getPeerPassword() { + return peerPassword; + } + + public Long getNetworkId() { + return networkId; + } + + public Long getNetworkAsNumber() { + return networkAsNumber; + } + + public String getGuestIp4Cidr() { + return guestIp4Cidr; + } + + public String getGuestIp6Cidr() { + return guestIp6Cidr; + } + + public Map getDetails() { + return details; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/network/Ipv4GuestSubnetNetworkMap.java b/api/src/main/java/org/apache/cloudstack/network/Ipv4GuestSubnetNetworkMap.java new file mode 100644 index 00000000000..569ed22c164 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/network/Ipv4GuestSubnetNetworkMap.java @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.network; + +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + +import java.util.Date; + +public interface Ipv4GuestSubnetNetworkMap extends Identity, InternalIdentity { + Date getAllocated(); + + Date getCreated(); + + enum State { + Allocating, // The subnet will be assigned to a network + Allocated, // The subnet is in use. + Releasing, // The subnet is being released. + Free // The subnet is ready to be allocated. + } + + Long getParentId(); + + String getSubnet(); + + Long getVpcId(); + + Long getNetworkId(); + + State getState(); + +} diff --git a/api/src/main/java/org/apache/cloudstack/network/RoutedIpv4Manager.java b/api/src/main/java/org/apache/cloudstack/network/RoutedIpv4Manager.java new file mode 100644 index 00000000000..2f704e9f47d --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/network/RoutedIpv4Manager.java @@ -0,0 +1,199 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.network; + +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.network.Network; +import com.cloud.network.rules.FirewallRule; +import com.cloud.network.vpc.Vpc; +import com.cloud.network.vpc.VpcOffering; +import com.cloud.offering.NetworkOffering; +import com.cloud.user.Account; +import com.cloud.utils.Pair; +import com.cloud.utils.component.PluggableService; + +import org.apache.cloudstack.api.command.admin.network.CreateIpv4SubnetForZoneCmd; +import org.apache.cloudstack.api.command.admin.network.CreateIpv4SubnetForGuestNetworkCmd; +import org.apache.cloudstack.api.command.admin.network.DedicateIpv4SubnetForZoneCmd; +import org.apache.cloudstack.api.command.admin.network.DeleteIpv4SubnetForZoneCmd; +import org.apache.cloudstack.api.command.admin.network.DeleteIpv4SubnetForGuestNetworkCmd; +import org.apache.cloudstack.api.command.admin.network.ListIpv4SubnetsForZoneCmd; +import org.apache.cloudstack.api.command.admin.network.ListIpv4SubnetsForGuestNetworkCmd; +import org.apache.cloudstack.api.command.admin.network.ReleaseDedicatedIpv4SubnetForZoneCmd; +import org.apache.cloudstack.api.command.admin.network.UpdateIpv4SubnetForZoneCmd; +import org.apache.cloudstack.api.command.admin.network.bgp.ChangeBgpPeersForNetworkCmd; +import org.apache.cloudstack.api.command.admin.network.bgp.ChangeBgpPeersForVpcCmd; +import org.apache.cloudstack.api.command.admin.network.bgp.CreateBgpPeerCmd; +import org.apache.cloudstack.api.command.admin.network.bgp.DedicateBgpPeerCmd; +import org.apache.cloudstack.api.command.admin.network.bgp.DeleteBgpPeerCmd; +import org.apache.cloudstack.api.command.admin.network.bgp.ListBgpPeersCmd; +import org.apache.cloudstack.api.command.admin.network.bgp.ReleaseDedicatedBgpPeerCmd; +import org.apache.cloudstack.api.command.admin.network.bgp.UpdateBgpPeerCmd; +import org.apache.cloudstack.api.command.user.network.routing.CreateRoutingFirewallRuleCmd; +import org.apache.cloudstack.api.command.user.network.routing.ListRoutingFirewallRulesCmd; +import org.apache.cloudstack.api.command.user.network.routing.UpdateRoutingFirewallRuleCmd; +import org.apache.cloudstack.api.response.BgpPeerResponse; +import org.apache.cloudstack.api.response.DataCenterIpv4SubnetResponse; +import org.apache.cloudstack.api.response.Ipv4SubnetForGuestNetworkResponse; +import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnet; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; + +import java.util.List; + +public interface RoutedIpv4Manager extends PluggableService, Configurable { + + ConfigKey RoutedNetworkIPv4MaxCidrSize = new ConfigKey<>(ConfigKey.CATEGORY_NETWORK, Integer.class, + "routed.network.ipv4.max.cidr.size", "30", "The maximum value of the cidr size for isolated networks in ROUTED mode", + true, ConfigKey.Scope.Account); + + ConfigKey RoutedNetworkIPv4MinCidrSize = new ConfigKey<>(ConfigKey.CATEGORY_NETWORK, Integer.class, + "routed.network.ipv4.min.cidr.size", "24", "The minimum value of the cidr size for isolated networks in ROUTED mode", + true, ConfigKey.Scope.Account); + + ConfigKey RoutedVpcIPv4MaxCidrSize = new ConfigKey<>(ConfigKey.CATEGORY_NETWORK, Integer.class, + "routed.ipv4.vpc.max.cidr.size", "28", "The maximum value of the cidr size for VPC in ROUTED mode", + true, ConfigKey.Scope.Account); + + ConfigKey RoutedVpcIPv4MinCidrSize = new ConfigKey<>(ConfigKey.CATEGORY_NETWORK, Integer.class, + "routed.ipv4.vpc.min.cidr.size", "22", "The minimum value of the cidr size for VPC in ROUTED mode", + true, ConfigKey.Scope.Account); + + ConfigKey RoutedIPv4NetworkCidrAutoAllocationEnabled = new ConfigKey<>(ConfigKey.CATEGORY_NETWORK, Boolean.class, + "routed.ipv4.network.cidr.auto.allocation.enabled", + "true", + "Indicates whether the auto-allocation of network CIDR for routed network is enabled or not.", + true, + ConfigKey.Scope.Account); + + ConfigKey UseSystemBgpPeers = new ConfigKey<>(ConfigKey.CATEGORY_NETWORK, Boolean.class, + "use.system.bgp.peers", + "true", + "If true, when account has dedicated bgp peers(s), the guest networks with dynamic routing will use both system and dedicated bgp peers. If false, only dedicated bgp peers will be used.", + true, + ConfigKey.Scope.Account); + + // Methods for DataCenterIpv4GuestSubnet APIs + DataCenterIpv4GuestSubnet createDataCenterIpv4GuestSubnet(CreateIpv4SubnetForZoneCmd createIpv4SubnetForZoneCmd); + + DataCenterIpv4SubnetResponse createDataCenterIpv4SubnetResponse(DataCenterIpv4GuestSubnet result); + + boolean deleteDataCenterIpv4GuestSubnet(DeleteIpv4SubnetForZoneCmd deleteIpv4SubnetForZoneCmd); + + DataCenterIpv4GuestSubnet updateDataCenterIpv4GuestSubnet(UpdateIpv4SubnetForZoneCmd updateIpv4SubnetForZoneCmd); + + List listDataCenterIpv4GuestSubnets(ListIpv4SubnetsForZoneCmd listIpv4SubnetsForZoneCmd); + + DataCenterIpv4GuestSubnet dedicateDataCenterIpv4GuestSubnet(DedicateIpv4SubnetForZoneCmd dedicateIpv4SubnetForZoneCmd); + + DataCenterIpv4GuestSubnet releaseDedicatedDataCenterIpv4GuestSubnet(ReleaseDedicatedIpv4SubnetForZoneCmd releaseDedicatedIpv4SubnetForZoneCmd); + + // Methods for Ipv4SubnetForGuestNetwork APIs + Ipv4GuestSubnetNetworkMap createIpv4SubnetForGuestNetwork(CreateIpv4SubnetForGuestNetworkCmd createIpv4SubnetForGuestNetworkCmd); + + boolean deleteIpv4SubnetForGuestNetwork(DeleteIpv4SubnetForGuestNetworkCmd deleteIpv4SubnetForGuestNetworkCmd); + + void releaseIpv4SubnetForGuestNetwork(long networkId); + + void releaseIpv4SubnetForVpc(long vpcId); + + List listIpv4GuestSubnetsForGuestNetwork(ListIpv4SubnetsForGuestNetworkCmd listIpv4SubnetsForGuestNetworkCmd); + + Ipv4SubnetForGuestNetworkResponse createIpv4SubnetForGuestNetworkResponse(Ipv4GuestSubnetNetworkMap subnet); + + // Methods for internal calls + void getOrCreateIpv4SubnetForGuestNetwork(Network network, String networkCidr); + + Ipv4GuestSubnetNetworkMap getOrCreateIpv4SubnetForGuestNetwork(Long domainId, Long accountId, Long zoneId, Integer networkCidrSize); + + void getOrCreateIpv4SubnetForVpc(Vpc vpc, String networkCidr); + + Ipv4GuestSubnetNetworkMap getOrCreateIpv4SubnetForVpc(Vpc vpc, Integer vpcCidrSize); + + void assignIpv4SubnetToNetwork(Network network); + + void assignIpv4SubnetToVpc(Vpc vpc); + + // Methods for Routing firewall rules + FirewallRule createRoutingFirewallRule(CreateRoutingFirewallRuleCmd createRoutingFirewallRuleCmd) throws NetworkRuleConflictException; + + Pair, Integer> listRoutingFirewallRules(ListRoutingFirewallRulesCmd listRoutingFirewallRulesCmd); + + FirewallRule updateRoutingFirewallRule(UpdateRoutingFirewallRuleCmd updateRoutingFirewallRuleCmd); + + boolean revokeRoutingFirewallRule(Long id); + + boolean applyRoutingFirewallRule(long id); + + boolean isVirtualRouterGateway(Network network); + + boolean isVirtualRouterGateway(NetworkOffering networkOffering); + + boolean isRoutedNetwork(Network network); + + boolean isDynamicRoutedNetwork(Network network); + + boolean isDynamicRoutedNetwork(NetworkOffering networkOffering); + + boolean isRoutedVpc(Vpc vpc); + + boolean isVpcVirtualRouterGateway(VpcOffering vpcOffering); + + BgpPeer createBgpPeer(CreateBgpPeerCmd createBgpPeerCmd); + + BgpPeerResponse createBgpPeerResponse(BgpPeer result); + + boolean deleteBgpPeer(DeleteBgpPeerCmd deleteBgpPeerCmd); + + BgpPeer updateBgpPeer(UpdateBgpPeerCmd updateBgpPeerCmd); + + BgpPeer dedicateBgpPeer(DedicateBgpPeerCmd dedicateBgpPeerCmd); + + BgpPeer releaseDedicatedBgpPeer(ReleaseDedicatedBgpPeerCmd releaseDedicatedBgpPeerCmd); + + List listBgpPeers(ListBgpPeersCmd listBgpPeersCmd); + + Network changeBgpPeersForNetwork(ChangeBgpPeersForNetworkCmd changeBgpPeersForNetworkCmd); + + Network removeBgpPeersFromNetwork(Network network); + + void validateBgpPeers(Account owner, Long zoneId, List bgpPeerIds); + + void persistBgpPeersForGuestNetwork(long networkId, List bgpPeerIds); + + void releaseBgpPeersForGuestNetwork(long networkId); + + boolean isDynamicRoutedVpc(Vpc vpc); + + boolean isDynamicRoutedVpc(VpcOffering vpcOff); + + void persistBgpPeersForVpc(long vpcId, List bgpPeerIds); + + void releaseBgpPeersForVpc(long vpcId); + + Vpc changeBgpPeersForVpc(ChangeBgpPeersForVpcCmd changeBgpPeersForVpcCmd); + + List getBgpPeerIdsForAccount(Account owner, long zoneIdd); + + void removeIpv4SubnetsForZoneByAccountId(long accountId); + + void removeIpv4SubnetsForZoneByDomainId(long domainId); + + void removeBgpPeersByAccountId(long accountId); + + void removeBgpPeersByDomainId(long domainId); +} diff --git a/api/src/main/java/org/apache/cloudstack/query/QueryService.java b/api/src/main/java/org/apache/cloudstack/query/QueryService.java index 3299e7537a2..c93e43d9f37 100644 --- a/api/src/main/java/org/apache/cloudstack/query/QueryService.java +++ b/api/src/main/java/org/apache/cloudstack/query/QueryService.java @@ -52,6 +52,7 @@ import org.apache.cloudstack.api.command.user.snapshot.CopySnapshotCmd; import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotsCmd; import org.apache.cloudstack.api.command.user.tag.ListTagsCmd; import org.apache.cloudstack.api.command.user.template.ListTemplatesCmd; +import org.apache.cloudstack.api.command.admin.vm.ListAffectedVmsForStorageScopeChangeCmd; import org.apache.cloudstack.api.command.user.vm.ListVMsCmd; import org.apache.cloudstack.api.command.user.vmgroup.ListVMGroupsCmd; import org.apache.cloudstack.api.command.user.volume.ListResourceDetailsCmd; @@ -89,6 +90,7 @@ import org.apache.cloudstack.api.response.StorageTagResponse; import org.apache.cloudstack.api.response.TemplateResponse; import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.api.response.UserVmResponse; +import org.apache.cloudstack.api.response.VirtualMachineResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.framework.config.ConfigKey; @@ -125,6 +127,9 @@ public interface QueryService { static final ConfigKey SharePublicTemplatesWithOtherDomains = new ConfigKey<>("Advanced", Boolean.class, "share.public.templates.with.other.domains", "true", "If false, templates of this domain will not show up in the list templates of other domains.", true, ConfigKey.Scope.Domain); + ConfigKey ReturnVmStatsOnVmList = new ConfigKey<>("Advanced", Boolean.class, "list.vm.default.details.stats", "true", + "Determines whether VM stats should be returned when details are not explicitly specified in listVirtualMachines API request. When false, details default to [group, nics, secgrp, tmpl, servoff, diskoff, backoff, iso, volume, min, affgrp]. When true, all details are returned including 'stats'.", true, ConfigKey.Scope.Global); + ListResponse searchForUsers(ListUsersCmd cmd) throws PermissionDeniedException; ListResponse searchForUsers(Long domainId, boolean recursive) throws PermissionDeniedException; @@ -137,6 +142,8 @@ public interface QueryService { ListResponse searchForUserVMs(ListVMsCmd cmd); + ListResponse listAffectedVmsForStorageScopeChange(ListAffectedVmsForStorageScopeChangeCmd cmd); + ListResponse searchForSecurityGroups(ListSecurityGroupsCmd cmd); ListResponse searchForRouters(ListRoutersCmd cmd); diff --git a/api/src/main/java/org/apache/cloudstack/resource/ResourceCleanupService.java b/api/src/main/java/org/apache/cloudstack/resource/ResourceCleanupService.java new file mode 100644 index 00000000000..0d72edb0748 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/resource/ResourceCleanupService.java @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.resource; + +import org.apache.cloudstack.api.command.admin.resource.PurgeExpungedResourcesCmd; +import org.apache.cloudstack.framework.config.ConfigKey; + +import com.cloud.vm.VirtualMachine; + +public interface ResourceCleanupService { + int MINIMUM_EXPUNGED_RESOURCE_PURGE_JOB_DELAY_IN_SECONDS = 3 * 60; + ConfigKey ExpungedResourcePurgeEnabled = new ConfigKey<>("Advanced", Boolean.class, + "expunged.resources.purge.enabled", "false", + "Whether to run a background task to purge the DB records of the expunged resources", + false, ConfigKey.Scope.Global); + ConfigKey ExpungedResourcePurgeResources = new ConfigKey<>("Advanced", String.class, + "expunged.resources.purge.resources", "", + "A comma-separated list of resource types that will be considered by the background task " + + "to purge the DB records of the expunged resources. Currently only VirtualMachine is supported. " + + "An empty value will result in considering all resource types for purging", + false, ConfigKey.Scope.Global); + ConfigKey ExpungedResourcesPurgeInterval = new ConfigKey<>("Advanced", Integer.class, + "expunged.resources.purge.interval", "86400", + "Interval (in seconds) for the background task to purge the DB records of the expunged resources", + false); + ConfigKey ExpungedResourcesPurgeDelay = new ConfigKey<>("Advanced", Integer.class, + "expunged.resources.purge.delay", "300", + "Initial delay (in seconds) to start the background task to purge the DB records of the " + + "expunged resources task", false); + ConfigKey ExpungedResourcesPurgeBatchSize = new ConfigKey<>("Advanced", Integer.class, + "expunged.resources.purge.batch.size", "50", + "Batch size to be used during purging of the DB records of the expunged resources", + true); + ConfigKey ExpungedResourcesPurgeStartTime = new ConfigKey<>("Advanced", String.class, + "expunged.resources.purge.start.time", "", + "Start time to be used by the background task to purge the DB records of the expunged " + + "resources. Use format \"yyyy-MM-dd\" or \"yyyy-MM-dd HH:mm:ss\"", true); + ConfigKey ExpungedResourcesPurgeKeepPastDays = new ConfigKey<>("Advanced", Integer.class, + "expunged.resources.purge.keep.past.days", "30", + "The number of days in the past from the execution time of the background task to purge " + + "the DB records of the expunged resources for which the expunged resources must not be purged. " + + "To enable purging DB records of the expunged resource till the execution of the background " + + "task, set the value to zero.", true); + ConfigKey ExpungedResourcePurgeJobDelay = new ConfigKey<>("Advanced", Integer.class, + "expunged.resource.purge.job.delay", + String.valueOf(MINIMUM_EXPUNGED_RESOURCE_PURGE_JOB_DELAY_IN_SECONDS), + String.format("Delay (in seconds) to execute the purging of the DB records of an expunged resource " + + "initiated by the configuration in the offering. Minimum value should be %d seconds " + + "and if a lower value is set then the minimum value will be used", + MINIMUM_EXPUNGED_RESOURCE_PURGE_JOB_DELAY_IN_SECONDS), + true); + + enum ResourceType { + VirtualMachine + } + + long purgeExpungedResources(PurgeExpungedResourcesCmd cmd); + void purgeExpungedVmResourcesLaterIfNeeded(VirtualMachine vm); +} diff --git a/api/src/main/java/org/apache/cloudstack/storage/browser/DataStoreObjectResponse.java b/api/src/main/java/org/apache/cloudstack/storage/browser/DataStoreObjectResponse.java index cac5cc91b03..c281fa115fd 100644 --- a/api/src/main/java/org/apache/cloudstack/storage/browser/DataStoreObjectResponse.java +++ b/api/src/main/java/org/apache/cloudstack/storage/browser/DataStoreObjectResponse.java @@ -41,6 +41,10 @@ public class DataStoreObjectResponse extends BaseResponse { @Param(description = "Template ID associated with the data store object.") private String templateId; + @SerializedName(ApiConstants.TEMPLATE_NAME) + @Param(description = "Template Name associated with the data store object.") + private String templateName; + @SerializedName(ApiConstants.FORMAT) @Param(description = "Format of template associated with the data store object.") private String format; @@ -49,10 +53,18 @@ public class DataStoreObjectResponse extends BaseResponse { @Param(description = "Snapshot ID associated with the data store object.") private String snapshotId; + @SerializedName("snapshotname") + @Param(description = "Snapshot Name associated with the data store object.") + private String snapshotName; + @SerializedName(ApiConstants.VOLUME_ID) @Param(description = "Volume ID associated with the data store object.") private String volumeId; + @SerializedName(ApiConstants.VOLUME_NAME) + @Param(description = "Volume Name associated with the data store object.") + private String volumeName; + @SerializedName(ApiConstants.LAST_UPDATED) @Param(description = "Last modified date of the file/directory.") private Date lastUpdated; @@ -86,6 +98,18 @@ public class DataStoreObjectResponse extends BaseResponse { this.volumeId = volumeId; } + public void setTemplateName(String templateName) { + this.templateName = templateName; + } + + public void setSnapshotName(String snapshotName) { + this.snapshotName = snapshotName; + } + + public void setVolumeName(String volumeName) { + this.volumeName = volumeName; + } + public String getName() { return name; } @@ -117,4 +141,16 @@ public class DataStoreObjectResponse extends BaseResponse { public Date getLastUpdated() { return lastUpdated; } + + public String getTemplateName() { + return templateName; + } + + public String getSnapshotName() { + return snapshotName; + } + + public String getVolumeName() { + return volumeName; + } } diff --git a/api/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFS.java b/api/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFS.java new file mode 100644 index 00000000000..bcba425abbf --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFS.java @@ -0,0 +1,189 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.sharedfs; + +import com.cloud.utils.fsm.StateMachine2; +import com.cloud.utils.fsm.StateObject; + +import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.framework.config.ConfigKey; + +import java.util.Date; + +public interface SharedFS extends ControlledEntity, Identity, InternalIdentity, StateObject { + + static final ConfigKey SharedFSFeatureEnabled = new ConfigKey("Advanced", Boolean.class, + "sharedfs.feature.enabled", + "true", + " Indicates whether the Shared FileSystem feature is enabled or not. Management server restart needed on change", + false); + + ConfigKey SharedFSCleanupInterval = new ConfigKey<>(Integer.class, + "sharedfs.cleanup.interval", + "Advanced", + "14400", + "The interval (in seconds) to wait before running the shared filesystem cleanup thread.", + false, + ConfigKey.Scope.Global, + null, + SharedFSFeatureEnabled.key()); + + ConfigKey SharedFSCleanupDelay = new ConfigKey<>(Integer.class, + "sharedfs.cleanup.delay", + "Advanced", + "86400", + "Determines how long (in seconds) to wait before actually expunging destroyed shared filesystem.", + false, + ConfigKey.Scope.Global, + null, + SharedFSFeatureEnabled.key()); + + ConfigKey SharedFSExpungeWorkers = new ConfigKey<>(Integer.class, + "sharedfs.expunge.workers", + "Advanced", + "2", + "Determines how many threads are created to do the work of expunging destroyed shared filesystem.", + false, + ConfigKey.Scope.Global, + null, + SharedFSFeatureEnabled.key()); + + String SharedFSVmNamePrefix = "sharedfs"; + String SharedFSPath = "/export"; + + enum FileSystemType { + EXT4, XFS + } + + enum Protocol { + NFS + } + + enum State { + Allocated(false, "The shared filesystem is allocated in db but hasn't been created or started yet."), + Ready(false, "The shared filesystem is ready to use."), + Stopping(true, "The shared filesystem is being stopped"), + Stopped(false, "The shared filesystem is in stopped state. It can not be used but the data is still there."), + Starting(true, "The shared filesystem is being started."), + Destroyed(false, "The shared filesystem is destroyed."), + Expunging(true, "The shared filesystem is being expunged."), + Expunged(false, "The shared filesystem has been expunged."), + Error(false, "The shared filesystem is in error state."); + + boolean _transitional; + String _description; + + /** + * SharedFS State + * + * @param transitional true for transition/non-final state, otherwise false + * @param description description of the state + */ + State(boolean transitional, String description) { + _transitional = transitional; + _description = description; + } + + public boolean isTransitional() { + return _transitional; + } + + public String getDescription() { + return _description; + } + + private final static StateMachine2 s_fsm = new StateMachine2(); + + public static StateMachine2 getStateMachine() { + return s_fsm; + } + + static { + s_fsm.addTransition(new StateMachine2.Transition(Allocated, Event.OperationFailed, Error, null)); + s_fsm.addTransition(new StateMachine2.Transition(Allocated, Event.OperationSucceeded, Ready, null)); + s_fsm.addTransition(new StateMachine2.Transition(Error, Event.DestroyRequested, Destroyed, null)); + s_fsm.addTransition(new StateMachine2.Transition(Stopped, Event.StartRequested, Starting, null)); + s_fsm.addTransition(new StateMachine2.Transition(Starting, Event.OperationSucceeded, Ready, null)); + s_fsm.addTransition(new StateMachine2.Transition(Starting, Event.OperationFailed, Stopped, null)); + s_fsm.addTransition(new StateMachine2.Transition(Ready, Event.StopRequested, Stopping, null)); + s_fsm.addTransition(new StateMachine2.Transition(Stopping, Event.OperationSucceeded, Stopped, null)); + s_fsm.addTransition(new StateMachine2.Transition(Stopping, Event.OperationFailed, Ready, null)); + s_fsm.addTransition(new StateMachine2.Transition(Stopped, Event.DestroyRequested, Destroyed, null)); + s_fsm.addTransition(new StateMachine2.Transition(Destroyed, Event.RecoveryRequested, Stopped, null)); + s_fsm.addTransition(new StateMachine2.Transition(Destroyed, Event.ExpungeOperation, Expunging, null)); + s_fsm.addTransition(new StateMachine2.Transition(Error, Event.ExpungeOperation, Expunging, null)); + s_fsm.addTransition(new StateMachine2.Transition(Expunging, Event.ExpungeOperation, Expunging, null)); + s_fsm.addTransition(new StateMachine2.Transition(Expunging, Event.OperationSucceeded, Expunged, null)); + } + } + + enum Event { + StopRequested, + StartRequested, + DestroyRequested, + OperationSucceeded, + OperationFailed, + ExpungeOperation, + RecoveryRequested, + } + + static String getSharedFSPath() { + return SharedFSPath; + } + + long getId(); + + String getName(); + + void setName(String name); + + String getUuid(); + + String getDescription(); + + void setDescription(String description); + + Long getDataCenterId(); + + State getState(); + + String getFsProviderName(); + + Protocol getProtocol(); + + Long getVolumeId(); + + void setVolumeId(Long volumeId); + + Long getVmId(); + + void setVmId(Long vmId); + + FileSystemType getFsType(); + + Long getServiceOfferingId(); + + void setServiceOfferingId(Long serviceOfferingId); + + Date getUpdated(); + + public long getUpdatedCount(); + + public void incrUpdatedCount(); +} diff --git a/api/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSLifeCycle.java b/api/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSLifeCycle.java new file mode 100644 index 00000000000..552dcf79f78 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSLifeCycle.java @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.sharedfs; + +import com.cloud.dc.DataCenter; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.ManagementServerException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.VirtualMachineMigrationException; +import com.cloud.utils.Pair; + +public interface SharedFSLifeCycle { + void checkPrerequisites(DataCenter zone, Long serviceOfferingId); + + Pair deploySharedFS(SharedFS sharedFS, Long networkId, Long diskOfferingId, Long size, Long minIops, Long maxIops) throws ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException, OperationTimedoutException; + + void startSharedFS(SharedFS sharedFS) throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException; + + boolean stopSharedFS(SharedFS sharedFS, Boolean forced); + + boolean deleteSharedFS(SharedFS sharedFS); + + boolean reDeploySharedFS(SharedFS sharedFS) throws ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException, OperationTimedoutException; + + boolean changeSharedFSServiceOffering(SharedFS sharedFS, Long serviceOfferingId) throws ManagementServerException, ResourceUnavailableException, VirtualMachineMigrationException; +} diff --git a/api/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSProvider.java b/api/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSProvider.java new file mode 100644 index 00000000000..3966970f188 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSProvider.java @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.sharedfs; + +import com.cloud.utils.component.Adapter; + +public interface SharedFSProvider extends Adapter { + + enum SharedFSProviderType { + SHAREDFSVM + } + + void configure(); + + SharedFSLifeCycle getSharedFSLifeCycle(); +} diff --git a/api/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSService.java b/api/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSService.java new file mode 100644 index 00000000000..21184de27a2 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSService.java @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.sharedfs; + +import java.util.List; + +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.command.user.storage.sharedfs.ChangeSharedFSDiskOfferingCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.ChangeSharedFSServiceOfferingCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.CreateSharedFSCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.DestroySharedFSCmd; + +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.ManagementServerException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.VirtualMachineMigrationException; + +import org.apache.cloudstack.api.command.user.storage.sharedfs.ListSharedFSCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.UpdateSharedFSCmd; +import org.apache.cloudstack.api.response.SharedFSResponse; +import org.apache.cloudstack.api.response.ListResponse; + +public interface SharedFSService { + + List getSharedFSProviders(); + + boolean stateTransitTo(SharedFS sharedFS, SharedFS.Event event); + + void setSharedFSProviders(List sharedFSProviders); + + SharedFSProvider getSharedFSProvider(String sharedFSProviderName); + + SharedFS allocSharedFS(CreateSharedFSCmd cmd); + + SharedFS deploySharedFS(CreateSharedFSCmd cmd) throws ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException, OperationTimedoutException; + + SharedFS startSharedFS(Long sharedFSId) throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException; + + SharedFS stopSharedFS(Long sharedFSId, Boolean forced); + + SharedFS restartSharedFS(Long sharedFSId, boolean cleanup) throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException; + + ListResponse searchForSharedFS(ResponseObject.ResponseView respView, ListSharedFSCmd cmd); + + SharedFS updateSharedFS(UpdateSharedFSCmd cmd); + + SharedFS changeSharedFSDiskOffering(ChangeSharedFSDiskOfferingCmd cmd) throws ResourceAllocationException; + + SharedFS changeSharedFSServiceOffering(ChangeSharedFSServiceOfferingCmd cmd) throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException, ManagementServerException, VirtualMachineMigrationException; + + Boolean destroySharedFS(DestroySharedFSCmd cmd); + + SharedFS recoverSharedFS(Long sharedFSId); + + void deleteSharedFS(Long sharedFSId); +} diff --git a/api/src/main/java/org/apache/cloudstack/usage/UsageService.java b/api/src/main/java/org/apache/cloudstack/usage/UsageService.java index 73962ba5875..00e8b431f8f 100644 --- a/api/src/main/java/org/apache/cloudstack/usage/UsageService.java +++ b/api/src/main/java/org/apache/cloudstack/usage/UsageService.java @@ -20,7 +20,6 @@ import com.cloud.utils.Pair; import org.apache.cloudstack.api.command.admin.usage.GenerateUsageRecordsCmd; import org.apache.cloudstack.api.command.admin.usage.ListUsageRecordsCmd; import org.apache.cloudstack.api.command.admin.usage.RemoveRawUsageRecordsCmd; -import org.apache.cloudstack.api.response.UsageTypeResponse; import java.util.List; import java.util.TimeZone; @@ -62,6 +61,4 @@ public interface UsageService { TimeZone getUsageTimezone(); boolean removeRawUsageRecords(RemoveRawUsageRecordsCmd cmd); - - List listUsageTypes(); } diff --git a/api/src/main/java/org/apache/cloudstack/usage/UsageTypes.java b/api/src/main/java/org/apache/cloudstack/usage/UsageTypes.java index 32ae34056ec..5ad360a8026 100644 --- a/api/src/main/java/org/apache/cloudstack/usage/UsageTypes.java +++ b/api/src/main/java/org/apache/cloudstack/usage/UsageTypes.java @@ -51,31 +51,31 @@ public class UsageTypes { public static List listUsageTypes() { List responseList = new ArrayList(); - responseList.add(new UsageTypeResponse(RUNNING_VM, "Running Vm Usage")); - responseList.add(new UsageTypeResponse(ALLOCATED_VM, "Allocated Vm Usage")); - responseList.add(new UsageTypeResponse(IP_ADDRESS, "IP Address Usage")); - responseList.add(new UsageTypeResponse(NETWORK_BYTES_SENT, "Network Usage (Bytes Sent)")); - responseList.add(new UsageTypeResponse(NETWORK_BYTES_RECEIVED, "Network Usage (Bytes Received)")); - responseList.add(new UsageTypeResponse(VOLUME, "Volume Usage")); - responseList.add(new UsageTypeResponse(TEMPLATE, "Template Usage")); - responseList.add(new UsageTypeResponse(ISO, "ISO Usage")); - responseList.add(new UsageTypeResponse(SNAPSHOT, "Snapshot Usage")); - responseList.add(new UsageTypeResponse(SECURITY_GROUP, "Security Group Usage")); - responseList.add(new UsageTypeResponse(LOAD_BALANCER_POLICY, "Load Balancer Usage")); - responseList.add(new UsageTypeResponse(PORT_FORWARDING_RULE, "Port Forwarding Usage")); - responseList.add(new UsageTypeResponse(NETWORK_OFFERING, "Network Offering Usage")); - responseList.add(new UsageTypeResponse(VPN_USERS, "VPN users usage")); - responseList.add(new UsageTypeResponse(VM_DISK_IO_READ, "VM Disk usage(I/O Read)")); - responseList.add(new UsageTypeResponse(VM_DISK_IO_WRITE, "VM Disk usage(I/O Write)")); - responseList.add(new UsageTypeResponse(VM_DISK_BYTES_READ, "VM Disk usage(Bytes Read)")); - responseList.add(new UsageTypeResponse(VM_DISK_BYTES_WRITE, "VM Disk usage(Bytes Write)")); - responseList.add(new UsageTypeResponse(VM_SNAPSHOT, "VM Snapshot storage usage")); - responseList.add(new UsageTypeResponse(VOLUME_SECONDARY, "Volume on secondary storage usage")); - responseList.add(new UsageTypeResponse(VM_SNAPSHOT_ON_PRIMARY, "VM Snapshot on primary storage usage")); - responseList.add(new UsageTypeResponse(BACKUP, "Backup storage usage")); - responseList.add(new UsageTypeResponse(BUCKET, "Bucket storage usage")); - responseList.add(new UsageTypeResponse(NETWORK, "Network usage")); - responseList.add(new UsageTypeResponse(VPC, "VPC usage")); + responseList.add(new UsageTypeResponse(RUNNING_VM, "RUNNING_VM", "Running Vm Usage")); + responseList.add(new UsageTypeResponse(ALLOCATED_VM, "ALLOCATED_VM", "Allocated Vm Usage")); + responseList.add(new UsageTypeResponse(IP_ADDRESS, "IP_ADDRESS", "IP Address Usage")); + responseList.add(new UsageTypeResponse(NETWORK_BYTES_SENT, "NETWORK_BYTES_SENT", "Network Usage (Bytes Sent)")); + responseList.add(new UsageTypeResponse(NETWORK_BYTES_RECEIVED, "NETWORK_BYTES_RECEIVED", "Network Usage (Bytes Received)")); + responseList.add(new UsageTypeResponse(VOLUME, "VOLUME", "Volume Usage")); + responseList.add(new UsageTypeResponse(TEMPLATE, "TEMPLATE", "Template Usage")); + responseList.add(new UsageTypeResponse(ISO, "ISO", "ISO Usage")); + responseList.add(new UsageTypeResponse(SNAPSHOT, "SNAPSHOT", "Snapshot Usage")); + responseList.add(new UsageTypeResponse(SECURITY_GROUP, "SECURITY_GROUP", "Security Group Usage")); + responseList.add(new UsageTypeResponse(LOAD_BALANCER_POLICY, "LOAD_BALANCER_POLICY", "Load Balancer Usage")); + responseList.add(new UsageTypeResponse(PORT_FORWARDING_RULE, "PORT_FORWARDING_RULE", "Port Forwarding Usage")); + responseList.add(new UsageTypeResponse(NETWORK_OFFERING, "NETWORK_OFFERING", "Network Offering Usage")); + responseList.add(new UsageTypeResponse(VPN_USERS, "VPN_USERS", "VPN users usage")); + responseList.add(new UsageTypeResponse(VM_DISK_IO_READ, "VM_DISK_IO_READ", "VM Disk usage(I/O Read)")); + responseList.add(new UsageTypeResponse(VM_DISK_IO_WRITE, "VM_DISK_IO_WRITE", "VM Disk usage(I/O Write)")); + responseList.add(new UsageTypeResponse(VM_DISK_BYTES_READ, "VM_DISK_BYTES_READ", "VM Disk usage(Bytes Read)")); + responseList.add(new UsageTypeResponse(VM_DISK_BYTES_WRITE, "VM_DISK_BYTES_WRITE", "VM Disk usage(Bytes Write)")); + responseList.add(new UsageTypeResponse(VM_SNAPSHOT, "VM_SNAPSHOT", "VM Snapshot storage usage")); + responseList.add(new UsageTypeResponse(VOLUME_SECONDARY, "VOLUME_SECONDARY", "Volume on secondary storage usage")); + responseList.add(new UsageTypeResponse(VM_SNAPSHOT_ON_PRIMARY, "VM_SNAPSHOT_ON_PRIMARY", "VM Snapshot on primary storage usage")); + responseList.add(new UsageTypeResponse(BACKUP, "BACKUP", "Backup storage usage")); + responseList.add(new UsageTypeResponse(BUCKET, "BUCKET", "Bucket storage usage")); + responseList.add(new UsageTypeResponse(NETWORK, "NETWORK", "Network usage")); + responseList.add(new UsageTypeResponse(VPC, "VPC", "VPC usage")); return responseList; } } diff --git a/api/src/main/java/org/apache/cloudstack/user/ResourceReservation.java b/api/src/main/java/org/apache/cloudstack/user/ResourceReservation.java index fb4fe121cc7..d49120d4491 100644 --- a/api/src/main/java/org/apache/cloudstack/user/ResourceReservation.java +++ b/api/src/main/java/org/apache/cloudstack/user/ResourceReservation.java @@ -22,6 +22,8 @@ import org.apache.cloudstack.api.InternalIdentity; import com.cloud.configuration.Resource; +import java.util.Date; + /** * an interface defining an {code}AutoClosable{code} reservation object */ @@ -39,4 +41,6 @@ ResourceReservation extends InternalIdentity { String getTag(); Long getReservedAmount(); + + Date getCreated(); } diff --git a/api/src/main/java/org/apache/cloudstack/userdata/UserDataManager.java b/api/src/main/java/org/apache/cloudstack/userdata/UserDataManager.java index 4dfcd0a7de1..b4bede24890 100644 --- a/api/src/main/java/org/apache/cloudstack/userdata/UserDataManager.java +++ b/api/src/main/java/org/apache/cloudstack/userdata/UserDataManager.java @@ -17,11 +17,16 @@ package org.apache.cloudstack.userdata; import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import com.cloud.utils.component.Manager; public interface UserDataManager extends Manager, Configurable { + String VM_USERDATA_MAX_LENGTH_STRING = "vm.userdata.max.length"; + ConfigKey VM_USERDATA_MAX_LENGTH = new ConfigKey<>("Advanced", Integer.class, VM_USERDATA_MAX_LENGTH_STRING, "32768", + "Max length of vm userdata after base64 encoding. Default is 32768 and maximum is 1048576", true); + String concatenateUserData(String userdata1, String userdata2, String userdataProvider); String validateUserData(String userData, BaseCmd.HTTPMethod httpmethod); } diff --git a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java index 23e0e371714..5697a040b81 100644 --- a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java +++ b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedInstanceTO.java @@ -17,6 +17,8 @@ package org.apache.cloudstack.vm; +import static com.cloud.utils.NumbersUtil.toHumanReadableSize; + import java.util.List; public class UnmanagedInstanceTO { @@ -317,6 +319,16 @@ public class UnmanagedInstanceTO { public int getDatastorePort() { return datastorePort; } + + @Override + public String toString() { + return "Disk {" + + "diskId='" + diskId + '\'' + + ", capacity=" + toHumanReadableSize(capacity) + + ", controller='" + controller + '\'' + + ", controllerUnit=" + controllerUnit + + "}"; + } } public static class Nic { @@ -409,5 +421,14 @@ public class UnmanagedInstanceTO { public void setPciSlot(String pciSlot) { this.pciSlot = pciSlot; } + + @Override + public String toString() { + return "Nic{" + + "nicId='" + nicId + '\'' + + ", adapterType='" + adapterType + '\'' + + ", macAddress='" + macAddress + '\'' + + "}"; + } } } diff --git a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManager.java b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManager.java index 53aece94964..b6233b9c270 100644 --- a/api/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManager.java +++ b/api/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManager.java @@ -30,6 +30,46 @@ public interface UnmanagedVMsManager extends VmImportService, UnmanageVMService, "If set to true, do not remove VM nics (and its MAC addresses) when unmanaging a VM, leaving them allocated but not reserved. " + "If set to false, nics are removed and MAC addresses can be reassigned", true, ConfigKey.Scope.Zone); + ConfigKey RemoteKvmInstanceDisksCopyTimeout = new ConfigKey<>(Integer.class, + "remote.kvm.instance.disks.copy.timeout", + "Advanced", + "30", + "Timeout (in mins) to prepare and copy the disks of remote KVM instance while importing the instance from an external host", + true, + ConfigKey.Scope.Global, + null); + + ConfigKey ConvertVmwareInstanceToKvmTimeout = new ConfigKey<>(Integer.class, + "convert.vmware.instance.to.kvm.timeout", + "Advanced", + "3", + "Timeout (in hours) for the instance conversion process from VMware through the virt-v2v binary on a KVM host", + true, + ConfigKey.Scope.Global, + null); + + ConfigKey ThreadsOnMSToImportVMwareVMFiles = new ConfigKey<>(Integer.class, + "threads.on.ms.to.import.vmware.vm.files", + "Advanced", + "0", + "Threads to use on the management server when importing VM files from VMWare." + + " -1 or 1 disables threads, 0 uses a thread per VM disk (disabled for single disk) and >1 for manual thread configuration." + + " Max number is 10, Default is 0.", + true, + ConfigKey.Scope.Global, + null); + + ConfigKey ThreadsOnKVMHostToImportVMwareVMFiles = new ConfigKey<>(Integer.class, + "threads.on.kvm.host.to.import.vmware.vm.files", + "Advanced", + "0", + "Threads to use on the KVM host (by the ovftool, if the version is 4.4.0+) when importing VM files from VMWare." + + " -1 or 1 disables threads, 0 uses a thread per VM disk (disabled for single disk) and >1 for manual thread configuration." + + " Max number is 10, Default is 0.", + true, + ConfigKey.Scope.Global, + null); + static boolean isSupported(Hypervisor.HypervisorType hypervisorType) { return hypervisorType == VMware || hypervisorType == KVM; } diff --git a/api/src/main/resources/META-INF/cloudstack/api-config/spring-api-config-context.xml b/api/src/main/resources/META-INF/cloudstack/api-config/spring-api-config-context.xml index 5ea32464ee9..12d3c2361ac 100644 --- a/api/src/main/resources/META-INF/cloudstack/api-config/spring-api-config-context.xml +++ b/api/src/main/resources/META-INF/cloudstack/api-config/spring-api-config-context.xml @@ -28,5 +28,5 @@ > - + diff --git a/api/src/main/resources/META-INF/cloudstack/api-planner/spring-api-planner-context.xml b/api/src/main/resources/META-INF/cloudstack/api-planner/spring-api-planner-context.xml index 8523daf1d21..b614e362950 100644 --- a/api/src/main/resources/META-INF/cloudstack/api-planner/spring-api-planner-context.xml +++ b/api/src/main/resources/META-INF/cloudstack/api-planner/spring-api-planner-context.xml @@ -30,5 +30,5 @@ - + diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/bgp/CreateASNRangeCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/bgp/CreateASNRangeCmdTest.java new file mode 100644 index 00000000000..603cda2040d --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/bgp/CreateASNRangeCmdTest.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.bgp; + +import com.cloud.bgp.ASNumberRange; +import com.cloud.bgp.BGPService; + +import org.apache.cloudstack.api.ResponseGenerator; +import org.apache.cloudstack.api.response.ASNRangeResponse; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class CreateASNRangeCmdTest { + + BGPService bgpService = Mockito.spy(BGPService.class); + ResponseGenerator _responseGenerator = Mockito.spy(ResponseGenerator.class); + + @Test + public void testCreateASNRangeCmd() { + Long zoneId = 1L; + Long startASNumber = 110000L; + Long endASNumber = 120000L; + + CreateASNRangeCmd cmd = new CreateASNRangeCmd(); + ReflectionTestUtils.setField(cmd, "zoneId", zoneId); + ReflectionTestUtils.setField(cmd, "startASNumber", startASNumber); + ReflectionTestUtils.setField(cmd, "endASNumber", endASNumber); + ReflectionTestUtils.setField(cmd,"bgpService", bgpService); + ReflectionTestUtils.setField(cmd,"_responseGenerator", _responseGenerator); + + Assert.assertEquals(zoneId, cmd.getZoneId()); + Assert.assertEquals(startASNumber, cmd.getStartASNumber()); + Assert.assertEquals(endASNumber, cmd.getEndASNumber()); + Assert.assertEquals(1L, cmd.getEntityOwnerId()); + + ASNumberRange asnRange = Mockito.mock(ASNumberRange.class); + Mockito.when(bgpService.createASNumberRange(zoneId, startASNumber, endASNumber)).thenReturn(asnRange); + + ASNRangeResponse response = Mockito.mock(ASNRangeResponse.class); + Mockito.when(_responseGenerator.createASNumberRangeResponse(asnRange)).thenReturn(response); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertEquals(response, cmd.getResponseObject()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/bgp/DeleteASNRangeCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/bgp/DeleteASNRangeCmdTest.java new file mode 100644 index 00000000000..2abcf736c5b --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/bgp/DeleteASNRangeCmdTest.java @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.bgp; + +import com.cloud.bgp.BGPService; + +import org.apache.cloudstack.api.response.SuccessResponse; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class DeleteASNRangeCmdTest { + + BGPService bgpService = Mockito.spy(BGPService.class); + + @Test + public void testDeleteASNRangeCmd() { + Long id = 200L; + + DeleteASNRangeCmd cmd = new DeleteASNRangeCmd(); + ReflectionTestUtils.setField(cmd, "id", id); + ReflectionTestUtils.setField(cmd,"bgpService", bgpService); + + Assert.assertEquals(id, cmd.getId()); + + Mockito.when(bgpService.deleteASRange(id)).thenReturn(true); + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Object response = cmd.getResponseObject(); + Assert.assertTrue(response instanceof SuccessResponse); + + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/bgp/ListASNRangesCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/bgp/ListASNRangesCmdTest.java new file mode 100644 index 00000000000..7f49c61a693 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/bgp/ListASNRangesCmdTest.java @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.bgp; + +import com.cloud.bgp.ASNumberRange; +import com.cloud.bgp.BGPService; + +import org.apache.cloudstack.api.ResponseGenerator; +import org.apache.cloudstack.api.response.ASNRangeResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import java.util.ArrayList; +import java.util.List; + +@RunWith(MockitoJUnitRunner.class) +public class ListASNRangesCmdTest { + + BGPService bgpService = Mockito.spy(BGPService.class); + ResponseGenerator _responseGenerator = Mockito.spy(ResponseGenerator.class); + + @Test + public void testListASNRangesCmdTest() { + Long zoneId = 1L; + + ListASNRangesCmd cmd = new ListASNRangesCmd(); + ReflectionTestUtils.setField(cmd, "zoneId", zoneId); + ReflectionTestUtils.setField(cmd,"bgpService", bgpService); + ReflectionTestUtils.setField(cmd,"_responseGenerator", _responseGenerator); + + Assert.assertEquals(zoneId, cmd.getZoneId()); + Assert.assertEquals(1L, cmd.getEntityOwnerId()); + + List ranges = new ArrayList<>(); + ASNumberRange asnRange = Mockito.mock(ASNumberRange.class); + ranges.add(asnRange); + + ASNRangeResponse asnRangeResponse = Mockito.mock(ASNRangeResponse.class); + Mockito.when(_responseGenerator.createASNumberRangeResponse(asnRange)).thenReturn(asnRangeResponse); + + Mockito.when(bgpService.listASNumberRanges(zoneId)).thenReturn(ranges); + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Object response = cmd.getResponseObject(); + Assert.assertTrue(response instanceof ListResponse); + ListResponse listResponse = (ListResponse) response; + Assert.assertEquals(1L, (long) listResponse.getCount()); + Assert.assertTrue(listResponse.getResponses().get(0) instanceof ASNRangeResponse); + ASNRangeResponse firstResponse = (ASNRangeResponse) listResponse.getResponses().get(0); + Assert.assertEquals(asnRangeResponse, firstResponse); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/bgp/ReleaseASNumberCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/bgp/ReleaseASNumberCmdTest.java new file mode 100644 index 00000000000..1b80e5bff7f --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/bgp/ReleaseASNumberCmdTest.java @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.bgp; + +import com.cloud.bgp.BGPService; +import com.cloud.utils.Pair; + +import org.apache.cloudstack.api.response.SuccessResponse; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class ReleaseASNumberCmdTest { + + BGPService bgpService = Mockito.spy(BGPService.class); + + @Test + public void testReleaseASNumberCmd() { + Long zoneId = 1L; + Long asNumber = 10000L; + + ReleaseASNumberCmd cmd = new ReleaseASNumberCmd(); + ReflectionTestUtils.setField(cmd, "zoneId", zoneId); + ReflectionTestUtils.setField(cmd, "asNumber", asNumber); + ReflectionTestUtils.setField(cmd,"bgpService", bgpService); + + Assert.assertEquals(zoneId, cmd.getZoneId()); + Assert.assertEquals(asNumber, cmd.getAsNumber()); + Assert.assertEquals(1L, cmd.getEntityOwnerId()); + + Pair resultPair = Mockito.mock(Pair.class); + Mockito.when(resultPair.first()).thenReturn(true); + Mockito.when(bgpService.releaseASNumber(zoneId, asNumber, false)).thenReturn(resultPair); + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Object response = cmd.getResponseObject(); + Assert.assertTrue(response instanceof SuccessResponse); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/network/CreateIpv4SubnetForGuestNetworkCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/CreateIpv4SubnetForGuestNetworkCmdTest.java new file mode 100644 index 00000000000..e1393e31699 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/CreateIpv4SubnetForGuestNetworkCmdTest.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network; + +import com.cloud.event.EventTypes; + +import org.apache.cloudstack.api.response.Ipv4SubnetForGuestNetworkResponse; +import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMap; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class CreateIpv4SubnetForGuestNetworkCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + @Test + public void testCreateIpv4SubnetForGuestNetworkCmd() { + Long parentId = 1L; + String subnet = "192.168.1.0/24"; + Integer cidrSize = 26; + + CreateIpv4SubnetForGuestNetworkCmd cmd = new CreateIpv4SubnetForGuestNetworkCmd(); + ReflectionTestUtils.setField(cmd, "parentId", parentId); + ReflectionTestUtils.setField(cmd, "subnet", subnet); + ReflectionTestUtils.setField(cmd, "cidrSize", cidrSize); + ReflectionTestUtils.setField(cmd,"routedIpv4Manager", routedIpv4Manager); + + Assert.assertEquals(parentId, cmd.getParentId()); + Assert.assertEquals(subnet, cmd.getSubnet()); + Assert.assertEquals(cidrSize, cmd.getCidrSize()); + Assert.assertEquals(1L, cmd.getEntityOwnerId()); + Assert.assertEquals(EventTypes.EVENT_IP4_GUEST_SUBNET_CREATE, cmd.getEventType()); + Assert.assertEquals(String.format("Creating guest IPv4 subnet %s in zone subnet=%s", subnet, parentId), cmd.getEventDescription()); + + Ipv4GuestSubnetNetworkMap ipv4GuestSubnetNetworkMap = Mockito.mock(Ipv4GuestSubnetNetworkMap.class); + Mockito.when(routedIpv4Manager.createIpv4SubnetForGuestNetwork(cmd)).thenReturn(ipv4GuestSubnetNetworkMap); + + Ipv4SubnetForGuestNetworkResponse response = Mockito.mock(Ipv4SubnetForGuestNetworkResponse.class); + Mockito.when(routedIpv4Manager.createIpv4SubnetForGuestNetworkResponse(ipv4GuestSubnetNetworkMap)).thenReturn(response); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertEquals(response, cmd.getResponseObject()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/network/CreateIpv4SubnetForZoneCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/CreateIpv4SubnetForZoneCmdTest.java new file mode 100644 index 00000000000..51c1eb986c4 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/CreateIpv4SubnetForZoneCmdTest.java @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network; + +import com.cloud.event.EventTypes; + +import org.apache.cloudstack.api.response.DataCenterIpv4SubnetResponse; +import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnet; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class CreateIpv4SubnetForZoneCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + @Test + public void testCreateIpv4SubnetForZoneCmd() { + Long zoneId = 1L; + String subnet = "192.168.1.0/24"; + String accountName = "user"; + Long projectId = 10L; + Long domainId = 11L; + + CreateIpv4SubnetForZoneCmd cmd = new CreateIpv4SubnetForZoneCmd(); + ReflectionTestUtils.setField(cmd, "zoneId", zoneId); + ReflectionTestUtils.setField(cmd, "subnet", subnet); + ReflectionTestUtils.setField(cmd, "accountName", accountName); + ReflectionTestUtils.setField(cmd,"projectId", projectId); + ReflectionTestUtils.setField(cmd,"domainId", domainId); + ReflectionTestUtils.setField(cmd,"routedIpv4Manager", routedIpv4Manager); + + Assert.assertEquals(zoneId, cmd.getZoneId()); + Assert.assertEquals(subnet, cmd.getSubnet()); + Assert.assertEquals(accountName, cmd.getAccountName()); + Assert.assertEquals(projectId, cmd.getProjectId()); + Assert.assertEquals(domainId, cmd.getDomainId()); + Assert.assertEquals(1L, cmd.getEntityOwnerId()); + Assert.assertEquals(EventTypes.EVENT_ZONE_IP4_SUBNET_CREATE, cmd.getEventType()); + Assert.assertEquals(String.format("Creating guest IPv4 subnet %s for zone=%s", subnet, zoneId), cmd.getEventDescription()); + + DataCenterIpv4GuestSubnet zoneSubnet = Mockito.mock(DataCenterIpv4GuestSubnet.class); + Mockito.when(routedIpv4Manager.createDataCenterIpv4GuestSubnet(cmd)).thenReturn(zoneSubnet); + + DataCenterIpv4SubnetResponse response = Mockito.mock(DataCenterIpv4SubnetResponse.class); + Mockito.when(routedIpv4Manager.createDataCenterIpv4SubnetResponse(zoneSubnet)).thenReturn(response); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertEquals(response, cmd.getResponseObject()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/network/DedicateIpv4SubnetForZoneCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/DedicateIpv4SubnetForZoneCmdTest.java new file mode 100644 index 00000000000..7db77098b23 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/DedicateIpv4SubnetForZoneCmdTest.java @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network; + +import com.cloud.event.EventTypes; +import org.apache.cloudstack.api.response.DataCenterIpv4SubnetResponse; +import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnet; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class DedicateIpv4SubnetForZoneCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + @Test + public void testDedicateIpv4SubnetForZoneCmd() { + Long id = 1L; + String accountName = "user"; + Long projectId = 10L; + Long domainId = 11L; + + DedicateIpv4SubnetForZoneCmd cmd = new DedicateIpv4SubnetForZoneCmd(); + ReflectionTestUtils.setField(cmd, "id", id); + ReflectionTestUtils.setField(cmd, "accountName", accountName); + ReflectionTestUtils.setField(cmd,"projectId", projectId); + ReflectionTestUtils.setField(cmd,"domainId", domainId); + ReflectionTestUtils.setField(cmd,"routedIpv4Manager", routedIpv4Manager); + + Assert.assertEquals(id, cmd.getId()); + Assert.assertEquals(accountName, cmd.getAccountName()); + Assert.assertEquals(projectId, cmd.getProjectId()); + Assert.assertEquals(domainId, cmd.getDomainId()); + + Assert.assertEquals(1L, cmd.getEntityOwnerId()); + Assert.assertEquals(EventTypes.EVENT_ZONE_IP4_SUBNET_DEDICATE, cmd.getEventType()); + Assert.assertEquals(String.format("Dedicating zone IPv4 subnet %s", id), cmd.getEventDescription()); + + DataCenterIpv4GuestSubnet zoneSubnet = Mockito.mock(DataCenterIpv4GuestSubnet.class); + Mockito.when(routedIpv4Manager.dedicateDataCenterIpv4GuestSubnet(cmd)).thenReturn(zoneSubnet); + + DataCenterIpv4SubnetResponse response = Mockito.mock(DataCenterIpv4SubnetResponse.class); + Mockito.when(routedIpv4Manager.createDataCenterIpv4SubnetResponse(zoneSubnet)).thenReturn(response); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertEquals(response, cmd.getResponseObject()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/network/DeleteIpv4SubnetForGuestNetworkCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/DeleteIpv4SubnetForGuestNetworkCmdTest.java new file mode 100644 index 00000000000..a4af5ddf748 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/DeleteIpv4SubnetForGuestNetworkCmdTest.java @@ -0,0 +1,58 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network; + +import com.cloud.event.EventTypes; + +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class DeleteIpv4SubnetForGuestNetworkCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + @Test + public void testDeleteIpv4SubnetForGuestNetworkCmd() { + Long id = 1L; + + DeleteIpv4SubnetForGuestNetworkCmd cmd = new DeleteIpv4SubnetForGuestNetworkCmd(); + ReflectionTestUtils.setField(cmd, "id", id); + ReflectionTestUtils.setField(cmd,"routedIpv4Manager", routedIpv4Manager); + + Assert.assertEquals(id, cmd.getId()); + Assert.assertEquals(1L, cmd.getEntityOwnerId()); + Assert.assertEquals(EventTypes.EVENT_IP4_GUEST_SUBNET_DELETE, cmd.getEventType()); + Assert.assertEquals(String.format("Deleting guest IPv4 subnet %s", id), cmd.getEventDescription()); + + Mockito.when(routedIpv4Manager.deleteIpv4SubnetForGuestNetwork(cmd)).thenReturn(true); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertTrue(cmd.getResponseObject() instanceof SuccessResponse); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/network/DeleteIpv4SubnetForZoneCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/DeleteIpv4SubnetForZoneCmdTest.java new file mode 100644 index 00000000000..7af173f09d9 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/DeleteIpv4SubnetForZoneCmdTest.java @@ -0,0 +1,58 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network; + +import com.cloud.event.EventTypes; + +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class DeleteIpv4SubnetForZoneCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + @Test + public void testDeleteIpv4SubnetForZoneCmd() { + Long id = 1L; + + DeleteIpv4SubnetForZoneCmd cmd = new DeleteIpv4SubnetForZoneCmd(); + ReflectionTestUtils.setField(cmd, "id", id); + ReflectionTestUtils.setField(cmd,"routedIpv4Manager", routedIpv4Manager); + + Assert.assertEquals(id, cmd.getId()); + Assert.assertEquals(1L, cmd.getEntityOwnerId()); + Assert.assertEquals(EventTypes.EVENT_ZONE_IP4_SUBNET_DELETE, cmd.getEventType()); + Assert.assertEquals(String.format("Deleting zone IPv4 subnet %s", id), cmd.getEventDescription()); + + Mockito.when(routedIpv4Manager.deleteDataCenterIpv4GuestSubnet(cmd)).thenReturn(true); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertTrue(cmd.getResponseObject() instanceof SuccessResponse); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/network/ListIpv4SubnetsForGuestNetworkCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/ListIpv4SubnetsForGuestNetworkCmdTest.java new file mode 100644 index 00000000000..cbfe34f774a --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/ListIpv4SubnetsForGuestNetworkCmdTest.java @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network; + +import org.apache.cloudstack.api.response.Ipv4SubnetForGuestNetworkResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMap; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import java.util.Arrays; +import java.util.List; + +@RunWith(MockitoJUnitRunner.class) +public class ListIpv4SubnetsForGuestNetworkCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + @Test + public void testListIpv4SubnetsForGuestNetworkCmd() { + Long id = 1L; + Long zoneId = 2L; + Long parentId = 2L; + String subnet = "192.168.1.0/24"; + Long networkId = 10L; + Long vpcId = 11L; + + ListIpv4SubnetsForGuestNetworkCmd cmd = new ListIpv4SubnetsForGuestNetworkCmd(); + ReflectionTestUtils.setField(cmd, "id", id); + ReflectionTestUtils.setField(cmd, "zoneId", zoneId); + ReflectionTestUtils.setField(cmd, "subnet", subnet); + ReflectionTestUtils.setField(cmd, "parentId", parentId); + ReflectionTestUtils.setField(cmd,"networkId", networkId); + ReflectionTestUtils.setField(cmd,"vpcId", vpcId); + ReflectionTestUtils.setField(cmd,"routedIpv4Manager", routedIpv4Manager); + + Assert.assertEquals(id, cmd.getId()); + Assert.assertEquals(zoneId, cmd.getZoneId()); + Assert.assertEquals(subnet, cmd.getSubnet()); + Assert.assertEquals(networkId, cmd.getNetworkId()); + Assert.assertEquals(vpcId, cmd.getVpcId()); + Assert.assertEquals(parentId, cmd.getParentId()); + + Assert.assertEquals(0L, cmd.getEntityOwnerId()); + + Ipv4GuestSubnetNetworkMap ipv4GuestSubnetNetworkMap = Mockito.mock(Ipv4GuestSubnetNetworkMap.class); + List ipv4GuestSubnetNetworkMaps = Arrays.asList(ipv4GuestSubnetNetworkMap); + Mockito.when(routedIpv4Manager.listIpv4GuestSubnetsForGuestNetwork(cmd)).thenReturn(ipv4GuestSubnetNetworkMaps); + + Ipv4SubnetForGuestNetworkResponse response = Mockito.mock(Ipv4SubnetForGuestNetworkResponse.class); + Mockito.when(routedIpv4Manager.createIpv4SubnetForGuestNetworkResponse(ipv4GuestSubnetNetworkMap)).thenReturn(response); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertTrue(cmd.getResponseObject() instanceof ListResponse); + ListResponse listResponse = (ListResponse) cmd.getResponseObject(); + Assert.assertEquals(1, (int) listResponse.getCount()); + Assert.assertEquals(response, listResponse.getResponses().get(0)); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/network/ListIpv4SubnetsForZoneCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/ListIpv4SubnetsForZoneCmdTest.java new file mode 100644 index 00000000000..2c7a246f70f --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/ListIpv4SubnetsForZoneCmdTest.java @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network; + +import org.apache.cloudstack.api.response.DataCenterIpv4SubnetResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnet; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import java.util.Arrays; +import java.util.List; + +@RunWith(MockitoJUnitRunner.class) +public class ListIpv4SubnetsForZoneCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + @Test + public void testListIpv4SubnetsForZoneCmd() { + Long id = 1L; + Long zoneId = 2L; + String subnet = "192.168.1.0/24"; + String accountName = "user"; + Long projectId = 10L; + Long domainId = 11L; + + ListIpv4SubnetsForZoneCmd cmd = new ListIpv4SubnetsForZoneCmd(); + ReflectionTestUtils.setField(cmd, "id", id); + ReflectionTestUtils.setField(cmd, "zoneId", zoneId); + ReflectionTestUtils.setField(cmd, "subnet", subnet); + ReflectionTestUtils.setField(cmd, "accountName", accountName); + ReflectionTestUtils.setField(cmd,"projectId", projectId); + ReflectionTestUtils.setField(cmd,"domainId", domainId); + ReflectionTestUtils.setField(cmd,"routedIpv4Manager", routedIpv4Manager); + + Assert.assertEquals(id, cmd.getId()); + Assert.assertEquals(zoneId, cmd.getZoneId()); + Assert.assertEquals(subnet, cmd.getSubnet()); + Assert.assertEquals(accountName, cmd.getAccountName()); + Assert.assertEquals(projectId, cmd.getProjectId()); + Assert.assertEquals(domainId, cmd.getDomainId()); + + Assert.assertEquals(0L, cmd.getEntityOwnerId()); + + DataCenterIpv4GuestSubnet zoneSubnet = Mockito.mock(DataCenterIpv4GuestSubnet.class); + List zoneSubnets = Arrays.asList(zoneSubnet); + Mockito.when(routedIpv4Manager.listDataCenterIpv4GuestSubnets(cmd)).thenReturn(zoneSubnets); + + DataCenterIpv4SubnetResponse response = Mockito.mock(DataCenterIpv4SubnetResponse.class); + Mockito.when(routedIpv4Manager.createDataCenterIpv4SubnetResponse(zoneSubnet)).thenReturn(response); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertTrue(cmd.getResponseObject() instanceof ListResponse); + ListResponse listResponse = (ListResponse) cmd.getResponseObject(); + Assert.assertEquals(1, (int) listResponse.getCount()); + Assert.assertEquals(response, listResponse.getResponses().get(0)); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/network/ReleaseDedicatedIpv4SubnetForZoneCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/ReleaseDedicatedIpv4SubnetForZoneCmdTest.java new file mode 100644 index 00000000000..9ce9a4f9464 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/ReleaseDedicatedIpv4SubnetForZoneCmdTest.java @@ -0,0 +1,62 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network; + +import com.cloud.event.EventTypes; +import org.apache.cloudstack.api.response.DataCenterIpv4SubnetResponse; +import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnet; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class ReleaseDedicatedIpv4SubnetForZoneCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + @Test + public void testReleaseDedicatedIpv4SubnetForZoneCmd() { + Long id = 1L; + + ReleaseDedicatedIpv4SubnetForZoneCmd cmd = new ReleaseDedicatedIpv4SubnetForZoneCmd(); + ReflectionTestUtils.setField(cmd, "id", id); + ReflectionTestUtils.setField(cmd,"routedIpv4Manager", routedIpv4Manager); + + Assert.assertEquals(id, cmd.getId()); + Assert.assertEquals(1L, cmd.getEntityOwnerId()); + Assert.assertEquals(EventTypes.EVENT_ZONE_IP4_SUBNET_RELEASE, cmd.getEventType()); + Assert.assertEquals(String.format("Releasing a dedicated zone IPv4 subnet %s", id), cmd.getEventDescription()); + + DataCenterIpv4GuestSubnet zoneSubnet = Mockito.mock(DataCenterIpv4GuestSubnet.class); + Mockito.when(routedIpv4Manager.releaseDedicatedDataCenterIpv4GuestSubnet(cmd)).thenReturn(zoneSubnet); + + DataCenterIpv4SubnetResponse response = Mockito.mock(DataCenterIpv4SubnetResponse.class); + Mockito.when(routedIpv4Manager.createDataCenterIpv4SubnetResponse(zoneSubnet)).thenReturn(response); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertEquals(response, cmd.getResponseObject()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/network/UpdateIpv4SubnetForZoneCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/UpdateIpv4SubnetForZoneCmdTest.java new file mode 100644 index 00000000000..cdb9cce22d8 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/UpdateIpv4SubnetForZoneCmdTest.java @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network; + +import com.cloud.event.EventTypes; + +import org.apache.cloudstack.api.response.DataCenterIpv4SubnetResponse; +import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnet; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class UpdateIpv4SubnetForZoneCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + @Test + public void testUpdateIpv4SubnetForZoneCmd() { + Long id = 1L; + String subnet = "192.168.1.0/24"; + + UpdateIpv4SubnetForZoneCmd cmd = new UpdateIpv4SubnetForZoneCmd(); + ReflectionTestUtils.setField(cmd, "id", id); + ReflectionTestUtils.setField(cmd, "subnet", subnet); + ReflectionTestUtils.setField(cmd,"routedIpv4Manager", routedIpv4Manager); + + Assert.assertEquals(id, cmd.getId()); + Assert.assertEquals(subnet, cmd.getSubnet()); + Assert.assertEquals(1L, cmd.getEntityOwnerId()); + Assert.assertEquals(EventTypes.EVENT_ZONE_IP4_SUBNET_UPDATE, cmd.getEventType()); + Assert.assertEquals(String.format("Updating zone IPv4 subnet %s", id), cmd.getEventDescription()); + + DataCenterIpv4GuestSubnet zoneSubnet = Mockito.mock(DataCenterIpv4GuestSubnet.class); + Mockito.when(routedIpv4Manager.updateDataCenterIpv4GuestSubnet(cmd)).thenReturn(zoneSubnet); + + DataCenterIpv4SubnetResponse response = Mockito.mock(DataCenterIpv4SubnetResponse.class); + Mockito.when(routedIpv4Manager.createDataCenterIpv4SubnetResponse(zoneSubnet)).thenReturn(response); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertEquals(response, cmd.getResponseObject()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/ChangeBgpPeersForNetworkCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/ChangeBgpPeersForNetworkCmdTest.java new file mode 100644 index 00000000000..28ddad17afe --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/ChangeBgpPeersForNetworkCmdTest.java @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network.bgp; + +import com.cloud.event.EventTypes; + +import com.cloud.network.Network; +import org.apache.cloudstack.api.ResponseGenerator; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.response.NetworkResponse; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import java.util.Arrays; +import java.util.List; + +@RunWith(MockitoJUnitRunner.class) +public class ChangeBgpPeersForNetworkCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + ResponseGenerator _responseGenerator = Mockito.spy(ResponseGenerator.class); + + @Test + public void testChangeBgpPeersForNetworkCmd() { + Long networkId = 10L; + List bgpPeerIds = Arrays.asList(20L, 21L); + + ChangeBgpPeersForNetworkCmd cmd = new ChangeBgpPeersForNetworkCmd(); + ReflectionTestUtils.setField(cmd, "networkId", networkId); + ReflectionTestUtils.setField(cmd, "bgpPeerIds", bgpPeerIds); + ReflectionTestUtils.setField(cmd,"routedIpv4Manager", routedIpv4Manager); + ReflectionTestUtils.setField(cmd,"_responseGenerator", _responseGenerator); + + Assert.assertEquals(networkId, cmd.getNetworkId()); + Assert.assertEquals(bgpPeerIds, cmd.getBgpPeerIds()); + Assert.assertEquals(1L, cmd.getEntityOwnerId()); + Assert.assertEquals(EventTypes.EVENT_NETWORK_BGP_PEER_UPDATE, cmd.getEventType()); + Assert.assertEquals(String.format("Changing Bgp Peers for network %s", networkId), cmd.getEventDescription()); + + Network network = Mockito.mock(Network.class); + Mockito.when(routedIpv4Manager.changeBgpPeersForNetwork(cmd)).thenReturn(network); + + NetworkResponse response = Mockito.mock(NetworkResponse.class); + Mockito.when(_responseGenerator.createNetworkResponse(ResponseObject.ResponseView.Full, network)).thenReturn(response); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertEquals(response, cmd.getResponseObject()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/ChangeBgpPeersForVpcCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/ChangeBgpPeersForVpcCmdTest.java new file mode 100644 index 00000000000..96eb1f020de --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/ChangeBgpPeersForVpcCmdTest.java @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network.bgp; + +import com.cloud.event.EventTypes; + +import com.cloud.network.vpc.Vpc; +import org.apache.cloudstack.api.ResponseGenerator; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.response.VpcResponse; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import java.util.Arrays; +import java.util.List; + +@RunWith(MockitoJUnitRunner.class) +public class ChangeBgpPeersForVpcCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + ResponseGenerator _responseGenerator = Mockito.spy(ResponseGenerator.class); + + @Test + public void testChangeBgpPeersForVpcCmd() { + Long VpcId = 10L; + List bgpPeerIds = Arrays.asList(20L, 21L); + + ChangeBgpPeersForVpcCmd cmd = new ChangeBgpPeersForVpcCmd(); + ReflectionTestUtils.setField(cmd, "vpcId", VpcId); + ReflectionTestUtils.setField(cmd, "bgpPeerIds", bgpPeerIds); + ReflectionTestUtils.setField(cmd,"routedIpv4Manager", routedIpv4Manager); + ReflectionTestUtils.setField(cmd,"_responseGenerator", _responseGenerator); + + Assert.assertEquals(VpcId, cmd.getVpcId()); + Assert.assertEquals(bgpPeerIds, cmd.getBgpPeerIds()); + Assert.assertEquals(1L, cmd.getEntityOwnerId()); + Assert.assertEquals(EventTypes.EVENT_VPC_BGP_PEER_UPDATE, cmd.getEventType()); + Assert.assertEquals(String.format("Changing Bgp Peers for VPC %s", VpcId), cmd.getEventDescription()); + + Vpc Vpc = Mockito.mock(Vpc.class); + Mockito.when(routedIpv4Manager.changeBgpPeersForVpc(cmd)).thenReturn(Vpc); + + VpcResponse response = Mockito.mock(VpcResponse.class); + Mockito.when(_responseGenerator.createVpcResponse(ResponseObject.ResponseView.Full, Vpc)).thenReturn(response); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertEquals(response, cmd.getResponseObject()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/CreateBgpPeerCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/CreateBgpPeerCmdTest.java new file mode 100644 index 00000000000..0d802bf3619 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/CreateBgpPeerCmdTest.java @@ -0,0 +1,85 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network.bgp; + +import com.cloud.event.EventTypes; + +import org.apache.cloudstack.api.response.BgpPeerResponse; +import org.apache.cloudstack.network.BgpPeer; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class CreateBgpPeerCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + @Test + public void testCreateBgpPeerCmd() { + Long zoneId = 1L; + String accountName = "user"; + Long projectId = 10L; + Long domainId = 11L; + String ip4Address = "ip4-address"; + String ip6Address = "ip6-address"; + Long peerAsNumber = 15000L; + String peerPassword = "peer-password"; + + CreateBgpPeerCmd cmd = new CreateBgpPeerCmd(); + ReflectionTestUtils.setField(cmd, "zoneId", zoneId); + ReflectionTestUtils.setField(cmd, "ip4Address", ip4Address); + ReflectionTestUtils.setField(cmd, "ip6Address", ip6Address); + ReflectionTestUtils.setField(cmd, "asNumber", peerAsNumber); + ReflectionTestUtils.setField(cmd, "password", peerPassword); + ReflectionTestUtils.setField(cmd, "accountName", accountName); + ReflectionTestUtils.setField(cmd,"projectId", projectId); + ReflectionTestUtils.setField(cmd,"domainId", domainId); + ReflectionTestUtils.setField(cmd,"routedIpv4Manager", routedIpv4Manager); + + Assert.assertEquals(zoneId, cmd.getZoneId()); + Assert.assertEquals(ip4Address, cmd.getIp4Address()); + Assert.assertEquals(ip6Address, cmd.getIp6Address()); + Assert.assertEquals(peerAsNumber, cmd.getAsNumber()); + Assert.assertEquals(peerPassword, cmd.getPassword()); + Assert.assertEquals(accountName, cmd.getAccountName()); + Assert.assertEquals(projectId, cmd.getProjectId()); + Assert.assertEquals(domainId, cmd.getDomainId()); + + Assert.assertEquals(1L, cmd.getEntityOwnerId()); + Assert.assertEquals(EventTypes.EVENT_BGP_PEER_CREATE, cmd.getEventType()); + Assert.assertEquals(String.format("Creating Bgp Peer %s for zone=%s", peerAsNumber, zoneId), cmd.getEventDescription()); + + BgpPeer bgpPeer = Mockito.mock(BgpPeer.class); + Mockito.when(routedIpv4Manager.createBgpPeer(cmd)).thenReturn(bgpPeer); + + BgpPeerResponse response = Mockito.mock(BgpPeerResponse.class); + Mockito.when(routedIpv4Manager.createBgpPeerResponse(bgpPeer)).thenReturn(response); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertEquals(response, cmd.getResponseObject()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/DedicateBgpPeerCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/DedicateBgpPeerCmdTest.java new file mode 100644 index 00000000000..f3ae007da28 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/DedicateBgpPeerCmdTest.java @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network.bgp; + +import com.cloud.event.EventTypes; +import org.apache.cloudstack.api.response.BgpPeerResponse; +import org.apache.cloudstack.network.BgpPeer; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class DedicateBgpPeerCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + @Test + public void testDedicateBgpPeerCmd() { + Long id = 1L; + String accountName = "user"; + Long projectId = 10L; + Long domainId = 11L; + + DedicateBgpPeerCmd cmd = new DedicateBgpPeerCmd(); + ReflectionTestUtils.setField(cmd, "id", id); + ReflectionTestUtils.setField(cmd, "accountName", accountName); + ReflectionTestUtils.setField(cmd,"projectId", projectId); + ReflectionTestUtils.setField(cmd,"domainId", domainId); + ReflectionTestUtils.setField(cmd,"routedIpv4Manager", routedIpv4Manager); + + Assert.assertEquals(id, cmd.getId()); + Assert.assertEquals(accountName, cmd.getAccountName()); + Assert.assertEquals(projectId, cmd.getProjectId()); + Assert.assertEquals(domainId, cmd.getDomainId()); + + Assert.assertEquals(1L, cmd.getEntityOwnerId()); + Assert.assertEquals(EventTypes.EVENT_BGP_PEER_DEDICATE, cmd.getEventType()); + Assert.assertEquals(String.format("Dedicating Bgp Peer %s", id), cmd.getEventDescription()); + + BgpPeer bgpPeer = Mockito.mock(BgpPeer.class); + Mockito.when(routedIpv4Manager.dedicateBgpPeer(cmd)).thenReturn(bgpPeer); + + BgpPeerResponse response = Mockito.mock(BgpPeerResponse.class); + Mockito.when(routedIpv4Manager.createBgpPeerResponse(bgpPeer)).thenReturn(response); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertEquals(response, cmd.getResponseObject()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/DeleteBgpPeerCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/DeleteBgpPeerCmdTest.java new file mode 100644 index 00000000000..5e747188fda --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/DeleteBgpPeerCmdTest.java @@ -0,0 +1,58 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network.bgp; + +import com.cloud.event.EventTypes; + +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class DeleteBgpPeerCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + @Test + public void testDeleteBgpPeerCmd() { + Long id = 1L; + + DeleteBgpPeerCmd cmd = new DeleteBgpPeerCmd(); + ReflectionTestUtils.setField(cmd, "id", id); + ReflectionTestUtils.setField(cmd,"routedIpv4Manager", routedIpv4Manager); + + Assert.assertEquals(id, cmd.getId()); + Assert.assertEquals(1L, cmd.getEntityOwnerId()); + Assert.assertEquals(EventTypes.EVENT_BGP_PEER_DELETE, cmd.getEventType()); + Assert.assertEquals(String.format("Deleting Bgp Peer %s", id), cmd.getEventDescription()); + + Mockito.when(routedIpv4Manager.deleteBgpPeer(cmd)).thenReturn(true); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertTrue(cmd.getResponseObject() instanceof SuccessResponse); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/ListBgpPeersCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/ListBgpPeersCmdTest.java new file mode 100644 index 00000000000..cb2027951ad --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/ListBgpPeersCmdTest.java @@ -0,0 +1,96 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network.bgp; + +import org.apache.cloudstack.api.response.BgpPeerResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.network.BgpPeer; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import java.util.Arrays; +import java.util.List; + +@RunWith(MockitoJUnitRunner.class) +public class ListBgpPeersCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + @Test + public void testIsDedicated() { + ListBgpPeersCmd cmd = new ListBgpPeersCmd(); + + Assert.assertNull(cmd.getDedicated()); + + ReflectionTestUtils.setField(cmd, "isDedicated", Boolean.TRUE); + Assert.assertTrue(cmd.getDedicated()); + + ReflectionTestUtils.setField(cmd, "isDedicated", Boolean.FALSE); + Assert.assertFalse(cmd.getDedicated()); + } + + @Test + public void testListBgpPeersCmd() { + Long id = 1L; + Long zoneId = 2L; + Long peerAsNumber = 15000L; + String accountName = "user"; + Long projectId = 10L; + Long domainId = 11L; + + ListBgpPeersCmd cmd = new ListBgpPeersCmd(); + ReflectionTestUtils.setField(cmd, "id", id); + ReflectionTestUtils.setField(cmd, "zoneId", zoneId); + ReflectionTestUtils.setField(cmd, "asNumber", peerAsNumber); + ReflectionTestUtils.setField(cmd, "accountName", accountName); + ReflectionTestUtils.setField(cmd,"projectId", projectId); + ReflectionTestUtils.setField(cmd,"domainId", domainId); + ReflectionTestUtils.setField(cmd,"routedIpv4Manager", routedIpv4Manager); + + Assert.assertEquals(id, cmd.getId()); + Assert.assertEquals(zoneId, cmd.getZoneId()); + Assert.assertEquals(peerAsNumber, cmd.getAsNumber()); + Assert.assertEquals(accountName, cmd.getAccountName()); + Assert.assertEquals(projectId, cmd.getProjectId()); + Assert.assertEquals(domainId, cmd.getDomainId()); + + Assert.assertEquals(0L, cmd.getEntityOwnerId()); + + BgpPeer bgpPeer = Mockito.mock(BgpPeer.class); + List bgpPeers = Arrays.asList(bgpPeer); + Mockito.when(routedIpv4Manager.listBgpPeers(cmd)).thenReturn(bgpPeers); + + BgpPeerResponse response = Mockito.mock(BgpPeerResponse.class); + Mockito.when(routedIpv4Manager.createBgpPeerResponse(bgpPeer)).thenReturn(response); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertTrue(cmd.getResponseObject() instanceof ListResponse); + ListResponse listResponse = (ListResponse) cmd.getResponseObject(); + Assert.assertEquals(1, (int) listResponse.getCount()); + Assert.assertEquals(response, listResponse.getResponses().get(0)); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/ReleaseDedicatedBgpPeerCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/ReleaseDedicatedBgpPeerCmdTest.java new file mode 100644 index 00000000000..8c55c4a7347 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/ReleaseDedicatedBgpPeerCmdTest.java @@ -0,0 +1,62 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network.bgp; + +import com.cloud.event.EventTypes; +import org.apache.cloudstack.api.response.BgpPeerResponse; +import org.apache.cloudstack.network.BgpPeer; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class ReleaseDedicatedBgpPeerCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + @Test + public void testReleaseDedicatedBgpPeerCmd() { + Long id = 1L; + + ReleaseDedicatedBgpPeerCmd cmd = new ReleaseDedicatedBgpPeerCmd(); + ReflectionTestUtils.setField(cmd, "id", id); + ReflectionTestUtils.setField(cmd,"routedIpv4Manager", routedIpv4Manager); + + Assert.assertEquals(id, cmd.getId()); + Assert.assertEquals(1L, cmd.getEntityOwnerId()); + Assert.assertEquals(EventTypes.EVENT_BGP_PEER_RELEASE, cmd.getEventType()); + Assert.assertEquals(String.format("Releasing a dedicated Bgp Peer %s", id), cmd.getEventDescription()); + + BgpPeer bgpPeer = Mockito.mock(BgpPeer.class); + Mockito.when(routedIpv4Manager.releaseDedicatedBgpPeer(cmd)).thenReturn(bgpPeer); + + BgpPeerResponse response = Mockito.mock(BgpPeerResponse.class); + Mockito.when(routedIpv4Manager.createBgpPeerResponse(bgpPeer)).thenReturn(response); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertEquals(response, cmd.getResponseObject()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/UpdateBgpPeerCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/UpdateBgpPeerCmdTest.java new file mode 100644 index 00000000000..003944c6147 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/network/bgp/UpdateBgpPeerCmdTest.java @@ -0,0 +1,87 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.network.bgp; + +import com.cloud.event.EventTypes; + +import org.apache.cloudstack.api.response.BgpPeerResponse; +import org.apache.cloudstack.network.BgpPeer; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class UpdateBgpPeerCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + @Test + public void testUpdateBgpPeerCmd() { + Long id = 1L; + String ip4Address = "ip4-address"; + String ip6Address = "ip6-address"; + Long peerAsNumber = 15000L; + String peerPassword = "peer-password"; + + UpdateBgpPeerCmd cmd = new UpdateBgpPeerCmd(); + ReflectionTestUtils.setField(cmd, "id", id); + ReflectionTestUtils.setField(cmd, "ip4Address", ip4Address); + ReflectionTestUtils.setField(cmd, "ip6Address", ip6Address); + ReflectionTestUtils.setField(cmd, "asNumber", peerAsNumber); + ReflectionTestUtils.setField(cmd, "password", peerPassword); + ReflectionTestUtils.setField(cmd,"routedIpv4Manager", routedIpv4Manager); + + Assert.assertEquals(id, cmd.getId()); + Assert.assertEquals(ip4Address, cmd.getIp4Address()); + Assert.assertEquals(ip6Address, cmd.getIp6Address()); + Assert.assertEquals(peerAsNumber, cmd.getAsNumber()); + Assert.assertEquals(peerPassword, cmd.getPassword()); + Assert.assertEquals(1L, cmd.getEntityOwnerId()); + Assert.assertEquals(EventTypes.EVENT_BGP_PEER_UPDATE, cmd.getEventType()); + Assert.assertEquals(String.format("Updating Bgp Peer %s", id), cmd.getEventDescription()); + + BgpPeer bgpPeer = Mockito.mock(BgpPeer.class); + Mockito.when(routedIpv4Manager.updateBgpPeer(cmd)).thenReturn(bgpPeer); + + BgpPeerResponse response = Mockito.mock(BgpPeerResponse.class); + Mockito.when(routedIpv4Manager.createBgpPeerResponse(bgpPeer)).thenReturn(response); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertEquals(response, cmd.getResponseObject()); + } + + @Test + public void testUpdateBgpPeerCleanupDetails() { + UpdateBgpPeerCmd cmd = new UpdateBgpPeerCmd(); + Assert.assertFalse(cmd.isCleanupDetails()); + + ReflectionTestUtils.setField(cmd, "cleanupDetails", Boolean.TRUE); + Assert.assertTrue(cmd.isCleanupDetails()); + + ReflectionTestUtils.setField(cmd, "cleanupDetails", Boolean.FALSE); + Assert.assertFalse(cmd.isCleanupDetails()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmdTest.java index f69e8cea4f3..6daa5de07cb 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmdTest.java @@ -37,4 +37,22 @@ public class CreateServiceOfferingCmdTest { Assert.assertEquals(createServiceOfferingCmd.getDisplayText(), netName); } + @Test + public void testIsPurgeResourcesNoOrNullValue() { + Assert.assertFalse(createServiceOfferingCmd.isPurgeResources()); + ReflectionTestUtils.setField(createServiceOfferingCmd, "purgeResources", false); + Assert.assertFalse(createServiceOfferingCmd.isPurgeResources()); + } + + @Test + public void testIsPurgeResourcesFalse() { + ReflectionTestUtils.setField(createServiceOfferingCmd, "purgeResources", false); + Assert.assertFalse(createServiceOfferingCmd.isPurgeResources()); + } + + @Test + public void testIsPurgeResourcesTrue() { + ReflectionTestUtils.setField(createServiceOfferingCmd, "purgeResources", true); + Assert.assertTrue(createServiceOfferingCmd.isPurgeResources()); + } } diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmdTest.java new file mode 100644 index 00000000000..1bb2be041e1 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmdTest.java @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.offering; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class UpdateServiceOfferingCmdTest { + + @InjectMocks + private UpdateServiceOfferingCmd updateServiceOfferingCmd; + + @Test + public void testIsPurgeResourcesNoOrNullValue() { + Assert.assertFalse(updateServiceOfferingCmd.isPurgeResources()); + ReflectionTestUtils.setField(updateServiceOfferingCmd, "purgeResources", false); + Assert.assertFalse(updateServiceOfferingCmd.isPurgeResources()); + } + + @Test + public void testIsPurgeResourcesFalse() { + ReflectionTestUtils.setField(updateServiceOfferingCmd, "purgeResources", false); + Assert.assertFalse(updateServiceOfferingCmd.isPurgeResources()); + } + + @Test + public void testIsPurgeResourcesTrue() { + ReflectionTestUtils.setField(updateServiceOfferingCmd, "purgeResources", true); + Assert.assertTrue(updateServiceOfferingCmd.isPurgeResources()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/resource/PurgeExpungedResourcesCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/resource/PurgeExpungedResourcesCmdTest.java new file mode 100644 index 00000000000..a628f13275c --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/resource/PurgeExpungedResourcesCmdTest.java @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.resource; + +import static org.junit.Assert.assertNull; + +import java.util.Date; + +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.PurgeExpungedResourcesResponse; +import org.apache.cloudstack.resource.ResourceCleanupService; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.utils.exception.CloudRuntimeException; + +@RunWith(MockitoJUnitRunner.class) +public class PurgeExpungedResourcesCmdTest { + @Mock + ResourceCleanupService resourceCleanupService; + + @Spy + @InjectMocks + PurgeExpungedResourcesCmd spyCmd = Mockito.spy(new PurgeExpungedResourcesCmd()); + + @Test + public void testGetResourceType() { + PurgeExpungedResourcesCmd cmd = new PurgeExpungedResourcesCmd(); + assertNull(cmd.getResourceType()); + ReflectionTestUtils.setField(cmd, "resourceType", ResourceCleanupService.ResourceType.VirtualMachine.toString()); + Assert.assertEquals(ResourceCleanupService.ResourceType.VirtualMachine.toString(), cmd.getResourceType()); + } + + @Test + public void testGetBatchSize() { + PurgeExpungedResourcesCmd cmd = new PurgeExpungedResourcesCmd(); + assertNull(cmd.getBatchSize()); + Long batchSize = 100L; + ReflectionTestUtils.setField(cmd, "batchSize", batchSize); + Assert.assertEquals(batchSize, cmd.getBatchSize()); + } + + @Test + public void testGetStartDate() { + PurgeExpungedResourcesCmd cmd = new PurgeExpungedResourcesCmd(); + assertNull(cmd.getStartDate()); + Date date = new Date(); + ReflectionTestUtils.setField(cmd, "startDate", date); + Assert.assertEquals(date, cmd.getStartDate()); + } + + @Test + public void testGetEndDate() { + PurgeExpungedResourcesCmd cmd = new PurgeExpungedResourcesCmd(); + assertNull(cmd.getEndDate()); + Date date = new Date(); + ReflectionTestUtils.setField(cmd, "endDate", date); + Assert.assertEquals(date, cmd.getEndDate()); + } + + @Test + public void testExecute() { + final PurgeExpungedResourcesResponse[] executeResponse = new PurgeExpungedResourcesResponse[1]; + Long result = 100L; + Mockito.when(resourceCleanupService.purgeExpungedResources(Mockito.any())).thenReturn(result); + Mockito.doAnswer((Answer) invocation -> { + executeResponse[0] = (PurgeExpungedResourcesResponse)invocation.getArguments()[0]; + return null; + }).when(spyCmd).setResponseObject(Mockito.any()); + spyCmd.execute(); + PurgeExpungedResourcesResponse response = executeResponse[0]; + Assert.assertNotNull(response); + Assert.assertEquals(result, response.getResourceCount()); + } + + @Test(expected = ServerApiException.class) + public void testExecuteException() { + Mockito.doThrow(CloudRuntimeException.class).when(resourceCleanupService).purgeExpungedResources(Mockito.any()); + spyCmd.execute(); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCCmdByAdminTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCCmdByAdminTest.java new file mode 100644 index 00000000000..c4e21bb948b --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCCmdByAdminTest.java @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.vpc; + +import com.cloud.network.vpc.VpcService; +import com.cloud.user.AccountService; +import com.cloud.utils.db.EntityManager; +import junit.framework.TestCase; +import org.apache.cloudstack.api.ResponseGenerator; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import java.util.List; + +@RunWith(MockitoJUnitRunner.class) +public class CreateVPCCmdByAdminTest extends TestCase { + + @Mock + public VpcService _vpcService; + @Mock + public EntityManager _entityMgr; + @Mock + public AccountService _accountService; + private ResponseGenerator responseGenerator; + @InjectMocks + CreateVPCCmdByAdmin cmd = new CreateVPCCmdByAdmin(); + + @Test + public void testBgpPeerIds() { + List bgpPeerIds = Mockito.mock(List.class); + ReflectionTestUtils.setField(cmd, "bgpPeerIds", bgpPeerIds); + Assert.assertEquals(bgpPeerIds, cmd.getBgpPeerIds()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/test/CreateRoleCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/test/CreateRoleCmdTest.java index 4b9d4fd8974..72ce9593364 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/test/CreateRoleCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/test/CreateRoleCmdTest.java @@ -54,6 +54,7 @@ public class CreateRoleCmdTest { when(role.getDescription()).thenReturn("User test"); when(role.getName()).thenReturn("testuser"); when(role.getRoleType()).thenReturn(RoleType.User); + when(role.getState()).thenReturn(Role.State.ENABLED); when(roleService.createRole(createRoleCmd.getRoleName(), createRoleCmd.getRoleType(), createRoleCmd.getRoleDescription(), true)).thenReturn(role); createRoleCmd.execute(); RoleResponse response = (RoleResponse) createRoleCmd.getResponseObject(); @@ -71,6 +72,7 @@ public class CreateRoleCmdTest { when(newRole.getDescription()).thenReturn("User test"); when(newRole.getName()).thenReturn("testuser"); when(newRole.getRoleType()).thenReturn(RoleType.User); + when(newRole.getState()).thenReturn(Role.State.ENABLED); when(roleService.createRole(createRoleCmd.getRoleName(), role, createRoleCmd.getRoleDescription(), true)).thenReturn(newRole); createRoleCmd.execute(); RoleResponse response = (RoleResponse) createRoleCmd.getResponseObject(); diff --git a/api/src/test/java/org/apache/cloudstack/api/command/test/ImportRoleCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/test/ImportRoleCmdTest.java index 6299c1ed8e2..d2597e5162f 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/test/ImportRoleCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/test/ImportRoleCmdTest.java @@ -32,19 +32,13 @@ import org.springframework.test.util.ReflectionTestUtils; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyCollection; -import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import com.cloud.exception.InvalidParameterValueException; -import java.util.ArrayList; import java.util.HashMap; -import java.util.List; import java.util.Map; -import java.util.regex.Matcher; public class ImportRoleCmdTest { private ImportRoleCmd importRoleCmd; @@ -93,6 +87,7 @@ public class ImportRoleCmdTest { when(role.getDescription()).thenReturn("test user imported"); when(role.getName()).thenReturn("Test User"); when(role.getRoleType()).thenReturn(RoleType.User); + when(role.getState()).thenReturn(Role.State.ENABLED); when(roleService.importRole(anyString(), any(), anyString(), any(), anyBoolean(), anyBoolean())).thenReturn(role); importRoleCmd.execute(); diff --git a/api/src/test/java/org/apache/cloudstack/api/command/test/UpdateRoleCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/test/UpdateRoleCmdTest.java index 84b91525742..9a1dae9a480 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/test/UpdateRoleCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/test/UpdateRoleCmdTest.java @@ -62,6 +62,7 @@ public class UpdateRoleCmdTest extends TestCase{ when(role.getId()).thenReturn(1L); when(role.getDescription()).thenReturn("Description Initial"); when(role.getName()).thenReturn("User"); + when(role.getState()).thenReturn(Role.State.ENABLED); updateRoleCmd.execute(); RoleResponse response = (RoleResponse) updateRoleCmd.getResponseObject(); assertEquals((String)ReflectionTestUtils.getField(response, "roleName"),role.getName()); diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/bgp/ListASNumbersCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/bgp/ListASNumbersCmdTest.java new file mode 100644 index 00000000000..9d7f4ef7cf1 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/bgp/ListASNumbersCmdTest.java @@ -0,0 +1,97 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.user.bgp; + +import com.cloud.bgp.ASNumber; +import com.cloud.bgp.BGPService; + +import com.cloud.utils.Pair; +import org.apache.cloudstack.api.ResponseGenerator; +import org.apache.cloudstack.api.response.ASNumberResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import java.util.ArrayList; +import java.util.List; + +@RunWith(MockitoJUnitRunner.class) +public class ListASNumbersCmdTest { + + BGPService bgpService = Mockito.spy(BGPService.class); + ResponseGenerator _responseGenerator = Mockito.spy(ResponseGenerator.class); + + @Test + public void testListASNumbersCmdTest() { + Long zoneId = 1L; + Long asNumberRangeId = 2L; + Integer asNumber = 10; + Long networkId = 11L; + Long vpcId = 12L; + String account = "account"; + Long domainId = 13L; + + ListASNumbersCmd cmd = new ListASNumbersCmd(); + ReflectionTestUtils.setField(cmd, "zoneId", zoneId); + ReflectionTestUtils.setField(cmd, "asNumberRangeId", asNumberRangeId); + ReflectionTestUtils.setField(cmd, "asNumber", asNumber); + ReflectionTestUtils.setField(cmd, "networkId", networkId); + ReflectionTestUtils.setField(cmd, "vpcId", vpcId); + ReflectionTestUtils.setField(cmd, "account", account); + ReflectionTestUtils.setField(cmd, "domainId", domainId); + ReflectionTestUtils.setField(cmd, "allocated", Boolean.TRUE); + + ReflectionTestUtils.setField(cmd,"bgpService", bgpService); + ReflectionTestUtils.setField(cmd,"_responseGenerator", _responseGenerator); + + Assert.assertEquals(zoneId, cmd.getZoneId()); + Assert.assertEquals(asNumberRangeId, cmd.getAsNumberRangeId()); + Assert.assertEquals(asNumber, cmd.getAsNumber()); + Assert.assertEquals(networkId, cmd.getNetworkId()); + Assert.assertEquals(vpcId, cmd.getVpcId()); + Assert.assertEquals(account, cmd.getAccount()); + Assert.assertEquals(domainId, cmd.getDomainId()); + Assert.assertTrue(cmd.getAllocated()); + + List asNumbers = new ArrayList<>(); + ASNumber asn = Mockito.mock(ASNumber.class); + asNumbers.add(asn); + Pair, Integer> pair = new Pair<>(asNumbers, 1); + + ASNumberResponse asNumberResponse = Mockito.mock(ASNumberResponse.class); + Mockito.when(_responseGenerator.createASNumberResponse(asn)).thenReturn(asNumberResponse); + + Mockito.when(bgpService.listASNumbers(cmd)).thenReturn(pair); + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Object response = cmd.getResponseObject(); + Assert.assertTrue(response instanceof ListResponse); + ListResponse listResponse = (ListResponse) response; + Assert.assertEquals(1L, (long) listResponse.getCount()); + Assert.assertTrue(listResponse.getResponses().get(0) instanceof ASNumberResponse); + ASNumberResponse firstResponse = (ASNumberResponse) listResponse.getResponses().get(0); + Assert.assertEquals(asNumberResponse, firstResponse); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/network/routing/CreateRoutingFirewallRuleCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/network/routing/CreateRoutingFirewallRuleCmdTest.java new file mode 100644 index 00000000000..11c41f4c92d --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/network/routing/CreateRoutingFirewallRuleCmdTest.java @@ -0,0 +1,251 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.user.network.routing; + +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.network.firewall.FirewallService; +import com.cloud.network.rules.FirewallRule; +import com.cloud.utils.net.NetUtils; + +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.api.ResponseGenerator; +import org.apache.cloudstack.api.response.FirewallResponse; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import java.util.Arrays; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +@RunWith(MockitoJUnitRunner.class) +public class CreateRoutingFirewallRuleCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + FirewallService _firewallService = Mockito.spy(FirewallService.class); + + ResponseGenerator _responseGenerator = Mockito.spy(ResponseGenerator.class); + + @Test + public void testIsDisplay() { + CreateRoutingFirewallRuleCmd cmd = new CreateRoutingFirewallRuleCmd(); + assertTrue(cmd.isDisplay()); + + ReflectionTestUtils.setField(cmd, "display", Boolean.TRUE); + assertTrue(cmd.isDisplay()); + + ReflectionTestUtils.setField(cmd, "display", Boolean.FALSE); + assertFalse(cmd.isDisplay()); + } + + @Test + public void testGetProtocolValid() { + CreateRoutingFirewallRuleCmd cmd = new CreateRoutingFirewallRuleCmd(); + assertEquals("", cmd.getProtocol()); + + ReflectionTestUtils.setField(cmd, "protocol", "1"); + assertEquals(NetUtils.ICMP_PROTO, cmd.getProtocol()); + + ReflectionTestUtils.setField(cmd, "protocol", "icmp"); + assertEquals(NetUtils.ICMP_PROTO, cmd.getProtocol()); + + ReflectionTestUtils.setField(cmd, "protocol", "6"); + assertEquals(NetUtils.TCP_PROTO, cmd.getProtocol()); + + ReflectionTestUtils.setField(cmd, "protocol", "tcp"); + assertEquals(NetUtils.TCP_PROTO, cmd.getProtocol()); + + ReflectionTestUtils.setField(cmd, "protocol", "17"); + assertEquals(NetUtils.UDP_PROTO, cmd.getProtocol()); + + ReflectionTestUtils.setField(cmd, "protocol", "udp"); + assertEquals(NetUtils.UDP_PROTO, cmd.getProtocol()); + } + + @Test(expected = InvalidParameterValueException.class) + public void testGetProtocolInValid() { + CreateRoutingFirewallRuleCmd cmd = new CreateRoutingFirewallRuleCmd(); + + ReflectionTestUtils.setField(cmd, "protocol", "100"); + cmd.getProtocol(); + } + + @Test + public void testGetSourceCidrListNull() { + CreateRoutingFirewallRuleCmd cmd = new CreateRoutingFirewallRuleCmd(); + + List result = cmd.getSourceCidrList(); + assertNotNull(result); + assertEquals(1, result.size()); + assertEquals(NetUtils.ALL_IP4_CIDRS, result.get(0)); + } + + @Test + public void testGetSourceCidrList() { + CreateRoutingFirewallRuleCmd cmd = new CreateRoutingFirewallRuleCmd(); + + List cidrList = Arrays.asList("192.168.0.0/24", "10.0.0.0/8"); + cmd.sourceCidrList = cidrList; + List result = cmd.getSourceCidrList(); + assertNotNull(result); + assertEquals(cidrList, result); + } + + @Test + public void testGetDestinationCidrListNull() { + CreateRoutingFirewallRuleCmd cmd = new CreateRoutingFirewallRuleCmd(); + + List result = cmd.getDestinationCidrList(); + assertNotNull(result); + assertEquals(1, result.size()); + assertEquals(NetUtils.ALL_IP4_CIDRS, result.get(0)); + } + + @Test + public void testGetDestinationCidrList() { + CreateRoutingFirewallRuleCmd cmd = new CreateRoutingFirewallRuleCmd(); + + List cidrList = Arrays.asList("192.168.0.0/24", "10.0.0.0/8"); + cmd.destinationCidrlist = cidrList; + List result = cmd.getDestinationCidrList(); + assertNotNull(result); + assertEquals(cidrList, result); + } + + @Test + public void testGetTrafficTypeValid() { + CreateRoutingFirewallRuleCmd cmd = new CreateRoutingFirewallRuleCmd(); + assertEquals(FirewallRule.TrafficType.Ingress, cmd.getTrafficType()); + + ReflectionTestUtils.setField(cmd, "trafficType", "ingress"); + assertEquals(FirewallRule.TrafficType.Ingress, cmd.getTrafficType()); + + ReflectionTestUtils.setField(cmd, "trafficType", "egress"); + assertEquals(FirewallRule.TrafficType.Egress, cmd.getTrafficType()); + } + + @Test(expected = InvalidParameterValueException.class) + public void testGetTrafficTypeInValid() { + CreateRoutingFirewallRuleCmd cmd = new CreateRoutingFirewallRuleCmd(); + + ReflectionTestUtils.setField(cmd, "trafficType", "invalid"); + cmd.getTrafficType(); + } + + @Test + public void testSourcePortStartEnd() { + CreateRoutingFirewallRuleCmd cmd = new CreateRoutingFirewallRuleCmd(); + assertNull(cmd.getSourcePortStart()); + assertNull(cmd.getSourcePortEnd()); + + ReflectionTestUtils.setField(cmd, "publicStartPort", 1111); + assertEquals(1111, (int) cmd.getSourcePortStart()); + assertEquals(1111, (int) cmd.getSourcePortEnd()); + + ReflectionTestUtils.setField(cmd, "publicEndPort", 2222); + assertEquals(1111, (int) cmd.getSourcePortStart()); + assertEquals(2222, (int) cmd.getSourcePortEnd()); + } + + @Test + public void testNetworkId() { + CreateRoutingFirewallRuleCmd cmd = new CreateRoutingFirewallRuleCmd(); + + ReflectionTestUtils.setField(cmd, "networkId", 1111L); + assertEquals(1111L, (long) cmd.getNetworkId()); + + assertEquals(1111L, (long) cmd.getApiResourceId()); + assertEquals(ApiCommandResourceType.Network, cmd.getApiResourceType()); + assertEquals(EventTypes.EVENT_ROUTING_IPV4_FIREWALL_RULE_CREATE, cmd.getEventType()); + assertEquals("Creating ipv4 firewall rule for routed network", cmd.getEventDescription()); + } + + @Test + public void testIcmpCodeAndType() { + CreateRoutingFirewallRuleCmd cmd = new CreateRoutingFirewallRuleCmd(); + ReflectionTestUtils.setField(cmd, "protocol", "tcp"); + assertNull(cmd.getIcmpType()); + assertNull(cmd.getIcmpCode()); + + ReflectionTestUtils.setField(cmd, "protocol", "icmp"); + assertEquals(-1, (int) cmd.getIcmpType()); + assertEquals(-1, (int) cmd.getIcmpCode()); + + ReflectionTestUtils.setField(cmd, "icmpType", 1111); + ReflectionTestUtils.setField(cmd, "icmpCode", 2222); + assertEquals(1111, (int) cmd.getIcmpType()); + assertEquals(2222, (int) cmd.getIcmpCode()); + } + + @Test + public void testCreate() throws Exception { + CreateRoutingFirewallRuleCmd cmd = new CreateRoutingFirewallRuleCmd(); + ReflectionTestUtils.setField(cmd, "routedIpv4Manager", routedIpv4Manager); + + Long id = 1L; + String uuid = "uuid"; + FirewallRule firewallRule = Mockito.spy(FirewallRule.class); + Mockito.when(firewallRule.getId()).thenReturn(id); + Mockito.when(firewallRule.getUuid()).thenReturn(uuid); + Mockito.when(routedIpv4Manager.createRoutingFirewallRule(cmd)).thenReturn(firewallRule); + + try { + cmd.create(); + } catch (Exception ignored) { + } + + assertEquals(id, cmd.getEntityId()); + assertEquals(uuid, cmd.getEntityUuid()); + } + + @Test + public void testExecute() throws Exception { + CreateRoutingFirewallRuleCmd cmd = new CreateRoutingFirewallRuleCmd(); + ReflectionTestUtils.setField(cmd, "routedIpv4Manager", routedIpv4Manager); + ReflectionTestUtils.setField(cmd, "_firewallService", _firewallService); + ReflectionTestUtils.setField(cmd, "_responseGenerator", _responseGenerator); + + Long id = 1L; + FirewallRule firewallRule = Mockito.spy(FirewallRule.class); + Mockito.when(firewallRule.getId()).thenReturn(id); + Mockito.when(_firewallService.getFirewallRule(id)).thenReturn(firewallRule); + Mockito.when(routedIpv4Manager.applyRoutingFirewallRule(id)).thenReturn(true); + + FirewallResponse ruleResponse = Mockito.mock(FirewallResponse.class); + Mockito.when(_responseGenerator.createFirewallResponse(firewallRule)).thenReturn(ruleResponse); + + try { + ReflectionTestUtils.setField(cmd, "id", id); + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertEquals(ruleResponse, cmd.getResponseObject()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/network/routing/DeleteRoutingFirewallRuleCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/network/routing/DeleteRoutingFirewallRuleCmdTest.java new file mode 100644 index 00000000000..2b55d4c6a58 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/network/routing/DeleteRoutingFirewallRuleCmdTest.java @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.user.network.routing; + +import com.cloud.event.EventTypes; +import com.cloud.network.firewall.FirewallService; +import com.cloud.network.rules.FirewallRule; + +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import static org.junit.Assert.assertEquals; + +@RunWith(MockitoJUnitRunner.class) +public class DeleteRoutingFirewallRuleCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + FirewallService _firewallService = Mockito.spy(FirewallService.class); + + @Test + public void testProperties() { + DeleteRoutingFirewallRuleCmd cmd = new DeleteRoutingFirewallRuleCmd(); + ReflectionTestUtils.setField(cmd, "_firewallService", _firewallService); + + long id = 1L; + long accountId = 2L; + long networkId = 3L; + + FirewallRule firewallRule = Mockito.spy(FirewallRule.class); + Mockito.when(firewallRule.getAccountId()).thenReturn(accountId); + Mockito.when(firewallRule.getNetworkId()).thenReturn(networkId); + Mockito.when(_firewallService.getFirewallRule(id)).thenReturn(firewallRule); + + ReflectionTestUtils.setField(cmd, "id", id); + assertEquals(id, (long) cmd.getId()); + assertEquals(accountId, cmd.getEntityOwnerId()); + assertEquals(networkId, (long) cmd.getApiResourceId()); + assertEquals(ApiCommandResourceType.Network, cmd.getApiResourceType()); + assertEquals(EventTypes.EVENT_ROUTING_IPV4_FIREWALL_RULE_DELETE, cmd.getEventType()); + assertEquals(String.format("Deleting ipv4 routing firewall rule ID=%s", id), cmd.getEventDescription()); + } + + + @Test + public void testExecute() throws Exception { + DeleteRoutingFirewallRuleCmd cmd = new DeleteRoutingFirewallRuleCmd(); + ReflectionTestUtils.setField(cmd, "routedIpv4Manager", routedIpv4Manager); + + Long id = 1L; + Mockito.when(routedIpv4Manager.revokeRoutingFirewallRule(id)).thenReturn(true); + + try { + ReflectionTestUtils.setField(cmd, "id", id); + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertTrue(cmd.getResponseObject() instanceof SuccessResponse); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/network/routing/ListRoutingFirewallRulesCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/network/routing/ListRoutingFirewallRulesCmdTest.java new file mode 100644 index 00000000000..53ac45917cb --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/network/routing/ListRoutingFirewallRulesCmdTest.java @@ -0,0 +1,115 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.user.network.routing; + +import com.cloud.network.rules.FirewallRule; +import com.cloud.utils.Pair; + +import org.apache.cloudstack.api.ResponseGenerator; +import org.apache.cloudstack.api.response.FirewallResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import java.util.Arrays; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +@RunWith(MockitoJUnitRunner.class) +public class ListRoutingFirewallRulesCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + ResponseGenerator _responseGenerator = Mockito.spy(ResponseGenerator.class); + + @Test + public void testIsDisplay() { + ListRoutingFirewallRulesCmd cmd = new ListRoutingFirewallRulesCmd(); + assertTrue(cmd.getDisplay()); + + ReflectionTestUtils.setField(cmd, "display", Boolean.TRUE); + assertTrue(cmd.getDisplay()); + + ReflectionTestUtils.setField(cmd, "display", Boolean.FALSE); + assertFalse(cmd.getDisplay()); + } + + @Test + public void testTrafficType() { + ListRoutingFirewallRulesCmd cmd = new ListRoutingFirewallRulesCmd(); + assertNull(cmd.getTrafficType()); + + ReflectionTestUtils.setField(cmd, "trafficType", "Ingress"); + assertEquals(FirewallRule.TrafficType.Ingress, cmd.getTrafficType()); + + ReflectionTestUtils.setField(cmd, "trafficType", "Egress"); + assertEquals(FirewallRule.TrafficType.Egress, cmd.getTrafficType()); + } + + @Test + public void testOtherProperties() { + ListRoutingFirewallRulesCmd cmd = new ListRoutingFirewallRulesCmd(); + + long id = 1L; + long networkId = 3L; + + ReflectionTestUtils.setField(cmd, "id", id); + ReflectionTestUtils.setField(cmd, "networkId", networkId); + + assertEquals(id, (long) cmd.getId()); + assertEquals(networkId, (long) cmd.getNetworkId()); + assertNull(cmd.getIpAddressId()); + } + + + @Test + public void testExecute() throws Exception { + ListRoutingFirewallRulesCmd cmd = new ListRoutingFirewallRulesCmd(); + ReflectionTestUtils.setField(cmd, "routedIpv4Manager", routedIpv4Manager); + ReflectionTestUtils.setField(cmd, "_responseGenerator", _responseGenerator); + + Long id = 1L; + FirewallRule firewallRule = Mockito.spy(FirewallRule.class); + List firewallRules = Arrays.asList(firewallRule); + Pair, Integer> result = new Pair<>(firewallRules, 1); + + Mockito.when(routedIpv4Manager.listRoutingFirewallRules(cmd)).thenReturn(result); + + FirewallResponse ruleResponse = Mockito.mock(FirewallResponse.class); + Mockito.when(_responseGenerator.createFirewallResponse(firewallRule)).thenReturn(ruleResponse); + + try { + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertTrue(cmd.getResponseObject() instanceof ListResponse); + ListResponse listResponse = (ListResponse) cmd.getResponseObject(); + Assert.assertEquals(1, (int) listResponse.getCount()); + Assert.assertEquals(ruleResponse, listResponse.getResponses().get(0)); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/network/routing/UpdateRoutingFirewallRuleCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/network/routing/UpdateRoutingFirewallRuleCmdTest.java new file mode 100644 index 00000000000..dd0319df696 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/network/routing/UpdateRoutingFirewallRuleCmdTest.java @@ -0,0 +1,106 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.user.network.routing; + +import com.cloud.event.EventTypes; +import com.cloud.network.firewall.FirewallService; +import com.cloud.network.rules.FirewallRule; + +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.api.ResponseGenerator; +import org.apache.cloudstack.api.response.FirewallResponse; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +@RunWith(MockitoJUnitRunner.class) +public class UpdateRoutingFirewallRuleCmdTest { + + RoutedIpv4Manager routedIpv4Manager = Mockito.spy(RoutedIpv4Manager.class); + + FirewallService _firewallService = Mockito.spy(FirewallService.class); + + ResponseGenerator _responseGenerator = Mockito.spy(ResponseGenerator.class); + + @Test + public void testIsDisplay() { + UpdateRoutingFirewallRuleCmd cmd = new UpdateRoutingFirewallRuleCmd(); + assertTrue(cmd.isDisplay()); + + ReflectionTestUtils.setField(cmd, "display", Boolean.TRUE); + assertTrue(cmd.isDisplay()); + + ReflectionTestUtils.setField(cmd, "display", Boolean.FALSE); + assertFalse(cmd.isDisplay()); + } + + @Test + public void testOtherProperties() { + UpdateRoutingFirewallRuleCmd cmd = new UpdateRoutingFirewallRuleCmd(); + ReflectionTestUtils.setField(cmd, "_firewallService", _firewallService); + + long id = 1L; + long accountId = 2L; + long networkId = 3L; + + FirewallRule firewallRule = Mockito.spy(FirewallRule.class); + Mockito.when(firewallRule.getAccountId()).thenReturn(accountId); + Mockito.when(firewallRule.getNetworkId()).thenReturn(networkId); + Mockito.when(_firewallService.getFirewallRule(id)).thenReturn(firewallRule); + + ReflectionTestUtils.setField(cmd, "id", id); + assertEquals(id, (long) cmd.getId()); + assertEquals(accountId, cmd.getEntityOwnerId()); + assertEquals(networkId, (long) cmd.getApiResourceId()); + assertEquals(ApiCommandResourceType.Network, cmd.getApiResourceType()); + assertEquals(EventTypes.EVENT_ROUTING_IPV4_FIREWALL_RULE_UPDATE, cmd.getEventType()); + assertEquals("Updating ipv4 routing firewall rule", cmd.getEventDescription()); + } + + + @Test + public void testExecute() throws Exception { + UpdateRoutingFirewallRuleCmd cmd = new UpdateRoutingFirewallRuleCmd(); + ReflectionTestUtils.setField(cmd, "routedIpv4Manager", routedIpv4Manager); + ReflectionTestUtils.setField(cmd, "_firewallService", _firewallService); + ReflectionTestUtils.setField(cmd, "_responseGenerator", _responseGenerator); + + Long id = 1L; + FirewallRule firewallRule = Mockito.spy(FirewallRule.class); + Mockito.when(routedIpv4Manager.updateRoutingFirewallRule(cmd)).thenReturn(firewallRule); + + FirewallResponse ruleResponse = Mockito.mock(FirewallResponse.class); + Mockito.when(_responseGenerator.createFirewallResponse(firewallRule)).thenReturn(ruleResponse); + + try { + ReflectionTestUtils.setField(cmd, "id", id); + cmd.execute(); + } catch (Exception ignored) { + } + + Assert.assertEquals(ruleResponse, cmd.getResponseObject()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/vm/CreateVMScheduleCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/vm/CreateVMScheduleCmdTest.java index c9eb672c9e9..99bc9d2b3fb 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/user/vm/CreateVMScheduleCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/vm/CreateVMScheduleCmdTest.java @@ -32,8 +32,6 @@ import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; -import java.security.InvalidParameterException; - public class CreateVMScheduleCmdTest { @Mock public VMScheduleManager vmScheduleManager; @@ -70,11 +68,11 @@ public class CreateVMScheduleCmdTest { /** * given: "We have a VMScheduleManager and CreateVMScheduleCmd" * when: "CreateVMScheduleCmd is executed with an invalid parameter" - * then: "an InvalidParameterException is thrown" + * then: "an InvalidParameterValueException is thrown" */ - @Test(expected = InvalidParameterException.class) - public void testInvalidParameterException() { - Mockito.when(vmScheduleManager.createSchedule(createVMScheduleCmd)).thenThrow(InvalidParameterException.class); + @Test(expected = InvalidParameterValueException.class) + public void testInvalidParameterValueException() { + Mockito.when(vmScheduleManager.createSchedule(createVMScheduleCmd)).thenThrow(InvalidParameterValueException.class); createVMScheduleCmd.execute(); } @@ -94,7 +92,7 @@ public class CreateVMScheduleCmdTest { /** * given: "We have an EntityManager and CreateVMScheduleCmd" * when: "CreateVMScheduleCmd.getEntityOwnerId is executed for a VM which doesn't exist" - * then: "an InvalidParameterException is thrown" + * then: "an InvalidParameterValueException is thrown" */ @Test(expected = InvalidParameterValueException.class) public void testFailureGetEntityOwnerId() { diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/vm/DeleteVMScheduleCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/vm/DeleteVMScheduleCmdTest.java index 9b4decc83aa..1f764a84365 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/user/vm/DeleteVMScheduleCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/vm/DeleteVMScheduleCmdTest.java @@ -34,8 +34,6 @@ import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; -import java.security.InvalidParameterException; - public class DeleteVMScheduleCmdTest { @Mock public VMScheduleManager vmScheduleManager; @@ -89,11 +87,11 @@ public class DeleteVMScheduleCmdTest { /** * given: "We have a VMScheduleManager and DeleteVMScheduleCmd" * when: "DeleteVMScheduleCmd is executed with an invalid parameter" - * then: "an InvalidParameterException is thrown" + * then: "an InvalidParameterValueException is thrown" */ - @Test(expected = InvalidParameterException.class) - public void testInvalidParameterException() { - Mockito.when(vmScheduleManager.removeSchedule(deleteVMScheduleCmd)).thenThrow(InvalidParameterException.class); + @Test(expected = InvalidParameterValueException.class) + public void testInvalidParameterValueException() { + Mockito.when(vmScheduleManager.removeSchedule(deleteVMScheduleCmd)).thenThrow(InvalidParameterValueException.class); deleteVMScheduleCmd.execute(); } @@ -113,7 +111,7 @@ public class DeleteVMScheduleCmdTest { /** * given: "We have an EntityManager and DeleteVMScheduleCmd" * when: "DeleteVMScheduleCmd.getEntityOwnerId is executed for a VM which doesn't exist" - * then: "an InvalidParameterException is thrown" + * then: "an InvalidParameterValueException is thrown" */ @Test(expected = InvalidParameterValueException.class) public void testFailureGetEntityOwnerId() { diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/vm/ListVMScheduleCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/vm/ListVMScheduleCmdTest.java index f9a1d945f03..f5434de3581 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/user/vm/ListVMScheduleCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/vm/ListVMScheduleCmdTest.java @@ -18,6 +18,7 @@ */ package org.apache.cloudstack.api.command.user.vm; +import com.cloud.exception.InvalidParameterValueException; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.VMScheduleResponse; import org.apache.cloudstack.vm.schedule.VMScheduleManager; @@ -30,7 +31,6 @@ import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; -import java.security.InvalidParameterException; import java.util.ArrayList; import java.util.Collections; @@ -88,11 +88,11 @@ public class ListVMScheduleCmdTest { /** * given: "We have a VMScheduleManager and ListVMScheduleCmd" * when: "ListVMScheduleCmd is executed with an invalid parameter" - * then: "an InvalidParameterException is thrown" + * then: "an InvalidParameterValueException is thrown" */ - @Test(expected = InvalidParameterException.class) - public void testInvalidParameterException() { - Mockito.when(vmScheduleManager.listSchedule(listVMScheduleCmd)).thenThrow(InvalidParameterException.class); + @Test(expected = InvalidParameterValueException.class) + public void testInvalidParameterValueException() { + Mockito.when(vmScheduleManager.listSchedule(listVMScheduleCmd)).thenThrow(InvalidParameterValueException.class); listVMScheduleCmd.execute(); ListResponse actualResponseObject = (ListResponse) listVMScheduleCmd.getResponseObject(); } diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/vm/UpdateVMScheduleCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/vm/UpdateVMScheduleCmdTest.java index 5ce133382f3..2c6c485f25b 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/user/vm/UpdateVMScheduleCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/vm/UpdateVMScheduleCmdTest.java @@ -33,8 +33,6 @@ import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; -import java.security.InvalidParameterException; - public class UpdateVMScheduleCmdTest { @Mock public VMScheduleManager vmScheduleManager; @@ -71,11 +69,11 @@ public class UpdateVMScheduleCmdTest { /** * given: "We have a VMScheduleManager and UpdateVMScheduleCmd" * when: "UpdateVMScheduleCmd is executed with an invalid parameter" - * then: "an InvalidParameterException is thrown" + * then: "an InvalidParameterValueException is thrown" */ - @Test(expected = InvalidParameterException.class) - public void testInvalidParameterException() { - Mockito.when(vmScheduleManager.updateSchedule(updateVMScheduleCmd)).thenThrow(InvalidParameterException.class); + @Test(expected = InvalidParameterValueException.class) + public void testInvalidParameterValueException() { + Mockito.when(vmScheduleManager.updateSchedule(updateVMScheduleCmd)).thenThrow(InvalidParameterValueException.class); updateVMScheduleCmd.execute(); } @@ -99,7 +97,7 @@ public class UpdateVMScheduleCmdTest { /** * given: "We have an EntityManager and UpdateVMScheduleCmd" * when: "UpdateVMScheduleCmd.getEntityOwnerId is executed for a VM Schedule which doesn't exist" - * then: "an InvalidParameterException is thrown" + * then: "an InvalidParameterValueException is thrown" */ @Test(expected = InvalidParameterValueException.class) public void testFailureGetEntityOwnerId() { diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmdTest.java index 79f27fd6687..2505c67e87d 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmdTest.java @@ -86,6 +86,20 @@ public class CreateVPCCmdTest extends TestCase { Assert.assertEquals(cmd.getCidr(), cidr); } + @Test + public void testGetCidrSize() { + int cidrSize = 24; + ReflectionTestUtils.setField(cmd, "cidrSize", cidrSize); + Assert.assertEquals(cidrSize, (int) cmd.getCidrSize()); + } + + @Test + public void testAsNumber() { + long asNumber = 10000; + ReflectionTestUtils.setField(cmd, "asNumber", asNumber); + Assert.assertEquals(asNumber, (long) cmd.getAsNumber()); + } + @Test public void testGetDisplayText() { String displayText = "VPC Network"; @@ -167,17 +181,16 @@ public class CreateVPCCmdTest extends TestCase { @Test public void testExecute() throws ResourceUnavailableException, InsufficientCapacityException { - ReflectionTestUtils.setField(cmd, "start", true); Vpc vpc = Mockito.mock(Vpc.class); VpcResponse response = Mockito.mock(VpcResponse.class); ReflectionTestUtils.setField(cmd, "id", 1L); responseGenerator = Mockito.mock(ResponseGenerator.class); - Mockito.when(_vpcService.startVpc(1L, true)).thenReturn(true); + Mockito.doNothing().when(_vpcService).startVpc(cmd); Mockito.when(_entityMgr.findById(Mockito.eq(Vpc.class), Mockito.any(Long.class))).thenReturn(vpc); cmd._responseGenerator = responseGenerator; Mockito.when(responseGenerator.createVpcResponse(ResponseObject.ResponseView.Restricted, vpc)).thenReturn(response); cmd.execute(); - Mockito.verify(_vpcService, Mockito.times(1)).startVpc(Mockito.anyLong(), Mockito.anyBoolean()); + Mockito.verify(_vpcService, Mockito.times(1)).startVpc(cmd); } } diff --git a/api/src/test/java/org/apache/cloudstack/api/response/ASNRangeResponseTest.java b/api/src/test/java/org/apache/cloudstack/api/response/ASNRangeResponseTest.java new file mode 100644 index 00000000000..50248383b4f --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/response/ASNRangeResponseTest.java @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.Date; + +@RunWith(MockitoJUnitRunner.class) +public final class ASNRangeResponseTest { + + private static String uuid = "uuid"; + private static String zoneId = "zoneid"; + private static long startASNumber = 10; + private static long endASNumber = 20; + private static Date created = new Date(); + + @Test + public void testASNRangeResponse() { + final ASNRangeResponse response = new ASNRangeResponse(); + + response.setId(uuid); + response.setZoneId(zoneId); + response.setStartASNumber(startASNumber); + response.setEndASNumber(endASNumber); + response.setCreated(created); + + Assert.assertEquals(uuid, response.getId()); + Assert.assertEquals(zoneId, response.getZoneId()); + Assert.assertEquals(startASNumber, (long) response.getStartASNumber()); + Assert.assertEquals(endASNumber, (long) response.getEndASNumber()); + Assert.assertEquals(created, response.getCreated()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/response/ASNumberResponseTest.java b/api/src/test/java/org/apache/cloudstack/api/response/ASNumberResponseTest.java new file mode 100644 index 00000000000..9515984134e --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/response/ASNumberResponseTest.java @@ -0,0 +1,92 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.Date; + +@RunWith(MockitoJUnitRunner.class) +public final class ASNumberResponseTest { + + private static String uuid = "uuid"; + private static String accountId = "account-id"; + private static String accountName = "account-name"; + private static String domainId = "domain-uuid"; + private static String domainName = "domain-name"; + private static Long asNumber = 15000L; + private static String asNumberRangeId = "as-number-range-uuid"; + private static String asNumberRange = "10000-20000"; + private static String zoneId = "zone-id"; + private static String zoneName = "zone-name"; + private static Date allocated = new Date(); + private static String allocationState = "allocated"; + + private static String associatedNetworkId = "network-id"; + + private static String associatedNetworkName = "network-name"; + + private static String vpcId = "vpc-uuid"; + private static String vpcName = "vpc-name"; + private static Date created = new Date(); + + + + @Test + public void testASNumberResponse() { + final ASNumberResponse response = new ASNumberResponse(); + + response.setId(uuid); + response.setAccountId(accountId); + response.setAccountName(accountName); + response.setDomainId(domainId); + response.setDomainName(domainName); + response.setAsNumber(asNumber); + response.setAsNumberRangeId(asNumberRangeId); + response.setAsNumberRange(asNumberRange); + response.setZoneId(zoneId); + response.setZoneName(zoneName); + response.setAllocated(allocated); + response.setAllocationState(allocationState); + response.setAssociatedNetworkId(associatedNetworkId); + response.setAssociatedNetworkName(associatedNetworkName); + response.setVpcId(vpcId); + response.setVpcName(vpcName); + response.setCreated(created); + + Assert.assertEquals(uuid, response.getId()); + Assert.assertEquals(accountId, response.getAccountId()); + Assert.assertEquals(accountName, response.getAccountName()); + Assert.assertEquals(domainId, response.getDomainId()); + Assert.assertEquals(domainName, response.getDomainName()); + Assert.assertEquals(asNumber, response.getAsNumber()); + Assert.assertEquals(asNumberRangeId, response.getAsNumberRangeId()); + Assert.assertEquals(asNumberRange, response.getAsNumberRange()); + Assert.assertEquals(zoneId, response.getZoneId()); + Assert.assertEquals(zoneName, response.getZoneName()); + Assert.assertEquals(allocated, response.getAllocated()); + Assert.assertEquals(allocationState, response.getAllocationState()); + Assert.assertEquals(associatedNetworkId, response.getAssociatedNetworkId()); + Assert.assertEquals(associatedNetworkName, response.getAssociatedNetworkName()); + Assert.assertEquals(vpcId, response.getVpcId()); + Assert.assertEquals(vpcName, response.getVpcName()); + Assert.assertEquals(created, response.getCreated()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/response/BgpPeerResponseTest.java b/api/src/test/java/org/apache/cloudstack/api/response/BgpPeerResponseTest.java new file mode 100644 index 00000000000..7c82eb84368 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/response/BgpPeerResponseTest.java @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.Date; +import java.util.HashMap; +import java.util.Map; + +@RunWith(MockitoJUnitRunner.class) +public final class BgpPeerResponseTest { + + private static String uuid = "uuid"; + private static String ip4Address = "ip4-address"; + private static String ip6Address = "ip6-address"; + private static Long asNumber = 15000L; + private static String password = "password"; + private static String accountName = "account-name"; + private static String domainId = "domain-uuid"; + private static String domainName = "domain-name"; + private static String projectId = "project-uuid"; + private static String projectName = "project-name"; + private static String zoneId = "zone-id"; + private static String zoneName = "zone-name"; + private static Date created = new Date(); + + @Test + public void testBgpPeerResponse() { + final BgpPeerResponse response = new BgpPeerResponse(); + + response.setId(uuid); + response.setIp4Address(ip4Address); + response.setIp6Address(ip6Address); + response.setAsNumber(asNumber); + response.setPassword(password); + response.setAccountName(accountName); + response.setDomainId(domainId); + response.setDomainName(domainName); + response.setProjectId(projectId); + response.setProjectName(projectName); + response.setZoneId(zoneId); + response.setZoneName(zoneName); + response.setCreated(created); + Map details = new HashMap<>(); + details.put("key", "value"); + response.setDetails(details); + + Assert.assertEquals(uuid, response.getId()); + Assert.assertEquals(ip4Address, response.getIp4Address()); + Assert.assertEquals(ip6Address, response.getIp6Address()); + Assert.assertEquals(asNumber, response.getAsNumber()); + Assert.assertEquals(password, response.getPassword()); + Assert.assertEquals(accountName, response.getAccountName()); + Assert.assertEquals(domainId, response.getDomainId()); + Assert.assertEquals(domainName, response.getDomainName()); + Assert.assertEquals(projectId, response.getProjectId()); + Assert.assertEquals(projectName, response.getProjectName()); + Assert.assertEquals(zoneId, response.getZoneId()); + Assert.assertEquals(zoneName, response.getZoneName()); + Assert.assertEquals(created, response.getCreated()); + Assert.assertEquals(details, response.getDetails()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/response/DataCenterIpv4SubnetResponseTest.java b/api/src/test/java/org/apache/cloudstack/api/response/DataCenterIpv4SubnetResponseTest.java new file mode 100644 index 00000000000..add9544de7d --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/response/DataCenterIpv4SubnetResponseTest.java @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.Date; + +@RunWith(MockitoJUnitRunner.class) +public final class DataCenterIpv4SubnetResponseTest { + + private static String uuid = "uuid"; + private static String subnet = "10.10.10.0/26"; + private static String accountName = "account-name"; + private static String domainId = "domain-uuid"; + private static String domainName = "domain-name"; + private static String projectId = "project-uuid"; + private static String projectName = "project-name"; + private static String zoneId = "zone-id"; + private static String zoneName = "zone-name"; + private static Date created = new Date(); + + @Test + public void testDataCenterIpv4SubnetResponse() { + final DataCenterIpv4SubnetResponse response = new DataCenterIpv4SubnetResponse(); + + response.setId(uuid); + response.setSubnet(subnet); + response.setAccountName(accountName); + response.setDomainId(domainId); + response.setDomainName(domainName); + response.setProjectId(projectId); + response.setProjectName(projectName); + response.setZoneId(zoneId); + response.setZoneName(zoneName); + response.setCreated(created); + + Assert.assertEquals(uuid, response.getId()); + Assert.assertEquals(subnet, response.getSubnet()); + Assert.assertEquals(accountName, response.getAccountName()); + Assert.assertEquals(domainId, response.getDomainId()); + Assert.assertEquals(domainName, response.getDomainName()); + Assert.assertEquals(projectId, response.getProjectId()); + Assert.assertEquals(projectName, response.getProjectName()); + Assert.assertEquals(zoneId, response.getZoneId()); + Assert.assertEquals(zoneName, response.getZoneName()); + Assert.assertEquals(created, response.getCreated()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/response/HostResponseTest.java b/api/src/test/java/org/apache/cloudstack/api/response/HostResponseTest.java index 523b3de9e3c..04e3ad7df60 100644 --- a/api/src/test/java/org/apache/cloudstack/api/response/HostResponseTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/response/HostResponseTest.java @@ -23,6 +23,8 @@ import org.junit.Test; import java.util.HashMap; import java.util.Map; +import com.cloud.hypervisor.Hypervisor; + public final class HostResponseTest extends TestCase { private static final String VALID_KEY = "validkey"; @@ -32,7 +34,7 @@ public final class HostResponseTest extends TestCase { public void testSetDetailsNull() { final HostResponse hostResponse = new HostResponse(); - hostResponse.setDetails(null); + hostResponse.setDetails(null, null); assertEquals(null, hostResponse.getDetails()); @@ -51,7 +53,7 @@ public final class HostResponseTest extends TestCase { final Map expectedDetails = new HashedMap(); expectedDetails.put(VALID_KEY, VALID_VALUE); - hostResponse.setDetails(details); + hostResponse.setDetails(details, Hypervisor.HypervisorType.KVM); final Map actualDetails = hostResponse.getDetails(); assertTrue(details != actualDetails); @@ -70,7 +72,7 @@ public final class HostResponseTest extends TestCase { final Map expectedDetails = new HashedMap(); expectedDetails.put(VALID_KEY, VALID_VALUE); - hostResponse.setDetails(details); + hostResponse.setDetails(details, Hypervisor.HypervisorType.KVM); final Map actualDetails = hostResponse.getDetails(); assertTrue(details != actualDetails); diff --git a/api/src/test/java/org/apache/cloudstack/api/response/Ipv4RouteResponseTest.java b/api/src/test/java/org/apache/cloudstack/api/response/Ipv4RouteResponseTest.java new file mode 100644 index 00000000000..717668d054e --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/response/Ipv4RouteResponseTest.java @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public final class Ipv4RouteResponseTest { + + private static String subnet = "10.10.10.0/24"; + private static String gateway = "10.10.10.1"; + + @Test + public void testIpv4RouteResponse() { + final Ipv4RouteResponse response = new Ipv4RouteResponse(subnet, gateway); + + Assert.assertEquals(subnet, response.getSubnet()); + Assert.assertEquals(gateway, response.getGateway()); + } + + @Test + public void testIpv4RouteResponse2() { + final Ipv4RouteResponse response = new Ipv4RouteResponse(); + + response.setSubnet(subnet); + response.setGateway(gateway); + + Assert.assertEquals(subnet, response.getSubnet()); + Assert.assertEquals(gateway, response.getGateway()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/response/Ipv4SubnetForGuestNetworkResponseTest.java b/api/src/test/java/org/apache/cloudstack/api/response/Ipv4SubnetForGuestNetworkResponseTest.java new file mode 100644 index 00000000000..6fb5141e7a9 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/response/Ipv4SubnetForGuestNetworkResponseTest.java @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.Date; + +@RunWith(MockitoJUnitRunner.class) +public final class Ipv4SubnetForGuestNetworkResponseTest { + + private static String uuid = "uuid"; + private static String parentId = "parent-id"; + private static String parentSubnet = "10.10.0.0/20"; + private static String subnet = "10.10.0.0/24"; + private static String state = "Allocating"; + + private static String zoneId = "zone-id"; + private static String zoneName = "zone-name"; + private static Date allocated = new Date(); + private static String networkId = "network-id"; + private static String networkName = "network-name"; + private static String vpcId = "vpc-uuid"; + private static String vpcName = "vpc-name"; + private static Date created = new Date(); + private static Date removed = new Date(); + + + + @Test + public void testIpv4SubnetForGuestNetworkResponse() { + final Ipv4SubnetForGuestNetworkResponse response = new Ipv4SubnetForGuestNetworkResponse(); + + response.setId(uuid); + response.setSubnet(subnet); + response.setParentId(parentId); + response.setParentSubnet(parentSubnet); + response.setState(state); + response.setZoneId(zoneId); + response.setZoneName(zoneName); + response.setAllocatedTime(allocated); + response.setNetworkId(networkId); + response.setNetworkName(networkName); + response.setVpcId(vpcId); + response.setVpcName(vpcName); + response.setCreated(created); + response.setRemoved(removed); + + Assert.assertEquals(uuid, response.getId()); + Assert.assertEquals(subnet, response.getSubnet()); + Assert.assertEquals(parentId, response.getParentId()); + Assert.assertEquals(parentSubnet, response.getParentSubnet()); + Assert.assertEquals(state, response.getState()); + Assert.assertEquals(zoneId, response.getZoneId()); + Assert.assertEquals(zoneName, response.getZoneName()); + Assert.assertEquals(allocated, response.getAllocatedTime()); + Assert.assertEquals(networkId, response.getNetworkId()); + Assert.assertEquals(networkName, response.getNetworkName()); + Assert.assertEquals(vpcId, response.getVpcId()); + Assert.assertEquals(vpcName, response.getVpcName()); + Assert.assertEquals(created, response.getCreated()); + Assert.assertEquals(removed, response.getRemoved()); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/network/BgpPeerTOTest.java b/api/src/test/java/org/apache/cloudstack/network/BgpPeerTOTest.java new file mode 100644 index 00000000000..2d1f8868ffc --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/network/BgpPeerTOTest.java @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.network; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; + +public class BgpPeerTOTest { + + private static Long peerId = 100L; + private static String ip4Address = "ip4-address"; + private static String ip6Address = "ip6-address"; + private static Long peerAsNumber = 15000L; + private static String peerPassword = "peer-password"; + private static Long networkId = 200L; + private static Long networkAsNumber = 20000L; + private static String guestIp4Cidr = "10.10.10.0/24"; + private static String guestIp6Cidr = "fd00:1111:2222:3333::1/64"; + + @Test + public void testBgpPeerTO1() { + BgpPeerTO bgpPeerTO = new BgpPeerTO(networkId); + + Assert.assertEquals(networkId, bgpPeerTO.getNetworkId()); + } + + @Test + public void testBgpPeerTO2() { + Map details = new HashMap<>(); + details.put(BgpPeer.Detail.EBGP_MultiHop, "100"); + + BgpPeerTO bgpPeerTO = new BgpPeerTO(peerId, ip4Address, ip6Address, peerAsNumber, peerPassword, + networkId, networkAsNumber, guestIp4Cidr, guestIp6Cidr, details); + + Assert.assertEquals(peerId, bgpPeerTO.getPeerId()); + Assert.assertEquals(peerAsNumber, bgpPeerTO.getPeerAsNumber()); + Assert.assertEquals(ip4Address, bgpPeerTO.getIp4Address()); + Assert.assertEquals(ip6Address, bgpPeerTO.getIp6Address()); + Assert.assertEquals(peerPassword, bgpPeerTO.getPeerPassword()); + Assert.assertEquals(networkId, bgpPeerTO.getNetworkId()); + Assert.assertEquals(networkAsNumber, bgpPeerTO.getNetworkAsNumber()); + Assert.assertEquals(guestIp4Cidr, bgpPeerTO.getGuestIp4Cidr()); + Assert.assertEquals(guestIp6Cidr, bgpPeerTO.getGuestIp6Cidr()); + + Assert.assertNotNull(bgpPeerTO.getDetails()); + details = bgpPeerTO.getDetails(); + Assert.assertEquals(1, details.size()); + Assert.assertEquals("100", details.get(BgpPeer.Detail.EBGP_MultiHop)); + } +} diff --git a/build/replace.properties b/build/replace.properties index 3d9a4597060..ce38727b80a 100644 --- a/build/replace.properties +++ b/build/replace.properties @@ -5,9 +5,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/client/conf/db.properties.in b/client/conf/db.properties.in index 8f31aff17e6..0f7d2706a42 100644 --- a/client/conf/db.properties.in +++ b/client/conf/db.properties.in @@ -34,10 +34,14 @@ db.cloud.uri= # CloudStack database tuning parameters +db.cloud.connectionPoolLib=hikaricp db.cloud.maxActive=250 db.cloud.maxIdle=30 -db.cloud.maxWait=10000 -db.cloud.validationQuery=SELECT 1 +db.cloud.maxWait=600000 +db.cloud.minIdleConnections=5 +db.cloud.connectionTimeout=30000 +db.cloud.keepAliveTime=600000 +db.cloud.validationQuery=/* ping */ SELECT 1 db.cloud.testOnBorrow=true db.cloud.testWhileIdle=true db.cloud.timeBetweenEvictionRunsMillis=40000 @@ -70,9 +74,13 @@ db.usage.uri= # usage database tuning parameters +db.usage.connectionPoolLib=hikaricp db.usage.maxActive=100 db.usage.maxIdle=30 -db.usage.maxWait=10000 +db.usage.maxWait=600000 +db.usage.minIdleConnections=5 +db.usage.connectionTimeout=30000 +db.usage.keepAliveTime=600000 db.usage.url.params=serverTimezone=UTC # Simulator database settings @@ -82,9 +90,13 @@ db.simulator.host=@DBHOST@ db.simulator.driver=@DBDRIVER@ db.simulator.port=3306 db.simulator.name=simulator +db.simulator.connectionPoolLib=hikaricp db.simulator.maxActive=250 db.simulator.maxIdle=30 -db.simulator.maxWait=10000 +db.simulator.maxWait=600000 +db.simulator.minIdleConnections=5 +db.simulator.connectionTimeout=30000 +db.simulator.keepAliveTime=600000 db.simulator.autoReconnect=true # Connection URI to the database "simulator". When this property is set, only the following properties will be used along with it: db.simulator.host, db.simulator.port, db.simulator.name, db.simulator.autoReconnect. Other properties will be ignored. diff --git a/client/conf/log4j-cloud.xml.in b/client/conf/log4j-cloud.xml.in index dbcf8c6198b..26da171269d 100755 --- a/client/conf/log4j-cloud.xml.in +++ b/client/conf/log4j-cloud.xml.in @@ -34,7 +34,7 @@ under the License. - + @@ -43,7 +43,7 @@ under the License. - + @@ -52,7 +52,7 @@ under the License. - + @@ -61,7 +61,7 @@ under the License. - + @@ -69,7 +69,7 @@ under the License. - + diff --git a/client/pom.xml b/client/pom.xml index 1d11fa74650..d89dbbbbe9d 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -27,18 +27,7 @@ cloudstack 4.20.0.0-SNAPSHOT - - - juniper-tungsten-api - https://github.com/radu-todirica/tungsten-api/raw/master - - - - net.juniper.tungsten - juniper-tungsten-api - 2.0 - javax.servlet javax.servlet-api @@ -282,11 +271,6 @@ cloud-plugin-network-ovs ${project.version} - - org.apache.cloudstack - cloud-plugin-network-tungsten - ${project.version} - org.apache.cloudstack cloud-plugin-network-elb @@ -438,6 +422,11 @@ cloud-mom-kafka ${project.version} + + org.apache.cloudstack + cloud-mom-webhook + ${project.version} + org.apache.cloudstack cloud-framework-agent-lb @@ -623,6 +612,11 @@ cloud-plugin-backup-networker ${project.version} + + org.apache.cloudstack + cloud-plugin-backup-nas + ${project.version} + org.apache.cloudstack cloud-plugin-integrations-kubernetes-service @@ -643,11 +637,21 @@ cloud-plugin-storage-object-minio ${project.version} + + org.apache.cloudstack + cloud-plugin-storage-object-ceph + ${project.version} + org.apache.cloudstack cloud-plugin-storage-object-simulator ${project.version} + + org.apache.cloudstack + cloud-plugin-sharedfs-provider-storagevm + ${project.version} + org.apache.cloudstack cloud-usage @@ -1093,6 +1097,11 @@ cloud-plugin-network-nsx ${project.version} + + org.apache.cloudstack + cloud-plugin-network-tungsten + ${project.version} + org.apache.cloudstack cloud-plugin-api-vmware-sioc diff --git a/client/src/main/resources/META-INF/cloudstack/webApplicationContext.xml b/client/src/main/resources/META-INF/cloudstack/webApplicationContext.xml index 3b3c6dbe35c..e1c46268618 100644 --- a/client/src/main/resources/META-INF/cloudstack/webApplicationContext.xml +++ b/client/src/main/resources/META-INF/cloudstack/webApplicationContext.xml @@ -25,7 +25,7 @@ http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context.xsd" - > + > diff --git a/core/src/main/java/com/cloud/agent/api/CheckConvertInstanceAnswer.java b/core/src/main/java/com/cloud/agent/api/CheckConvertInstanceAnswer.java new file mode 100644 index 00000000000..a02ac92927a --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/CheckConvertInstanceAnswer.java @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api; + +public class CheckConvertInstanceAnswer extends Answer { + + private boolean ovfExportSupported = false; + + public CheckConvertInstanceAnswer() { + super(); + } + + public CheckConvertInstanceAnswer(Command command, boolean success) { + super(command, success, ""); + } + + public CheckConvertInstanceAnswer(Command command, boolean success, String details) { + super(command, success, details); + } + + public CheckConvertInstanceAnswer(Command command, boolean success, boolean ovfExportSupported, String details) { + super(command, success, details); + this.ovfExportSupported = ovfExportSupported; + } + + public boolean isOvfExportSupported() { + return ovfExportSupported; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/CheckConvertInstanceCommand.java b/core/src/main/java/com/cloud/agent/api/CheckConvertInstanceCommand.java new file mode 100644 index 00000000000..fc066e5c589 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/CheckConvertInstanceCommand.java @@ -0,0 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api; + +public class CheckConvertInstanceCommand extends Command { + boolean checkWindowsGuestConversionSupport = false; + + public CheckConvertInstanceCommand() { + } + + public CheckConvertInstanceCommand(boolean checkWindowsGuestConversionSupport) { + this.checkWindowsGuestConversionSupport = checkWindowsGuestConversionSupport; + } + + @Override + public boolean executeInSequence() { + return false; + } + + public boolean getCheckWindowsGuestConversionSupport() { + return checkWindowsGuestConversionSupport; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/CheckVolumeAnswer.java b/core/src/main/java/com/cloud/agent/api/CheckVolumeAnswer.java index dd136d8642f..5a32ab59a7a 100644 --- a/core/src/main/java/com/cloud/agent/api/CheckVolumeAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/CheckVolumeAnswer.java @@ -17,7 +17,6 @@ package com.cloud.agent.api; -@LogLevel(LogLevel.Log4jLevel.Trace) public class CheckVolumeAnswer extends Answer { private long size; diff --git a/core/src/main/java/com/cloud/agent/api/CheckVolumeCommand.java b/core/src/main/java/com/cloud/agent/api/CheckVolumeCommand.java index b4036bebf3a..bd44b35c895 100644 --- a/core/src/main/java/com/cloud/agent/api/CheckVolumeCommand.java +++ b/core/src/main/java/com/cloud/agent/api/CheckVolumeCommand.java @@ -21,7 +21,6 @@ package com.cloud.agent.api; import com.cloud.agent.api.to.StorageFilerTO; -@LogLevel(LogLevel.Log4jLevel.Trace) public class CheckVolumeCommand extends Command { String srcFile; diff --git a/core/src/main/java/com/cloud/agent/api/ConvertInstanceCommand.java b/core/src/main/java/com/cloud/agent/api/ConvertInstanceCommand.java index 63234b04480..b8250903f85 100644 --- a/core/src/main/java/com/cloud/agent/api/ConvertInstanceCommand.java +++ b/core/src/main/java/com/cloud/agent/api/ConvertInstanceCommand.java @@ -28,16 +28,24 @@ public class ConvertInstanceCommand extends Command { private Hypervisor.HypervisorType destinationHypervisorType; private List destinationStoragePools; private DataStoreTO conversionTemporaryLocation; + private String templateDirOnConversionLocation; + private boolean checkConversionSupport; + private boolean exportOvfToConversionLocation; + private int threadsCountToExportOvf = 0; public ConvertInstanceCommand() { } public ConvertInstanceCommand(RemoteInstanceTO sourceInstance, Hypervisor.HypervisorType destinationHypervisorType, - List destinationStoragePools, DataStoreTO conversionTemporaryLocation) { + List destinationStoragePools, DataStoreTO conversionTemporaryLocation, + String templateDirOnConversionLocation, boolean checkConversionSupport, boolean exportOvfToConversionLocation) { this.sourceInstance = sourceInstance; this.destinationHypervisorType = destinationHypervisorType; this.destinationStoragePools = destinationStoragePools; this.conversionTemporaryLocation = conversionTemporaryLocation; + this.templateDirOnConversionLocation = templateDirOnConversionLocation; + this.checkConversionSupport = checkConversionSupport; + this.exportOvfToConversionLocation = exportOvfToConversionLocation; } public RemoteInstanceTO getSourceInstance() { @@ -56,6 +64,26 @@ public class ConvertInstanceCommand extends Command { return conversionTemporaryLocation; } + public String getTemplateDirOnConversionLocation() { + return templateDirOnConversionLocation; + } + + public boolean getCheckConversionSupport() { + return checkConversionSupport; + } + + public boolean getExportOvfToConversionLocation() { + return exportOvfToConversionLocation; + } + + public int getThreadsCountToExportOvf() { + return threadsCountToExportOvf; + } + + public void setThreadsCountToExportOvf(int threadsCountToExportOvf) { + this.threadsCountToExportOvf = threadsCountToExportOvf; + } + @Override public boolean executeInSequence() { return false; diff --git a/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeAnswer.java b/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeAnswer.java index f6d7cab4596..e79005be71b 100644 --- a/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeAnswer.java @@ -17,7 +17,6 @@ package com.cloud.agent.api; -@LogLevel(LogLevel.Log4jLevel.Trace) public class CopyRemoteVolumeAnswer extends Answer { private String remoteIp; diff --git a/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeCommand.java b/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeCommand.java index 82bc4d7cb45..798336b0e72 100644 --- a/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeCommand.java +++ b/core/src/main/java/com/cloud/agent/api/CopyRemoteVolumeCommand.java @@ -21,16 +21,13 @@ package com.cloud.agent.api; import com.cloud.agent.api.to.StorageFilerTO; -@LogLevel(LogLevel.Log4jLevel.Trace) public class CopyRemoteVolumeCommand extends Command { - String remoteIp; String username; + @LogLevel(LogLevel.Log4jLevel.Off) String password; String srcFile; - String tmpPath; - StorageFilerTO storageFilerTO; public CopyRemoteVolumeCommand(String remoteIp, String username, String password) { diff --git a/core/src/main/java/com/cloud/agent/api/GetRemoteVmsAnswer.java b/core/src/main/java/com/cloud/agent/api/GetRemoteVmsAnswer.java index 8cd072f1da1..c4e590591d0 100644 --- a/core/src/main/java/com/cloud/agent/api/GetRemoteVmsAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/GetRemoteVmsAnswer.java @@ -22,10 +22,10 @@ import org.apache.cloudstack.vm.UnmanagedInstanceTO; import java.util.HashMap; import java.util.List; -@LogLevel(LogLevel.Log4jLevel.Trace) public class GetRemoteVmsAnswer extends Answer { private String remoteIp; + @LogLevel(LogLevel.Log4jLevel.Trace) private HashMap unmanagedInstances; List vmNames; diff --git a/core/src/main/java/com/cloud/agent/api/GetRemoteVmsCommand.java b/core/src/main/java/com/cloud/agent/api/GetRemoteVmsCommand.java index 5c71d12dbd0..5b6b9bdd360 100644 --- a/core/src/main/java/com/cloud/agent/api/GetRemoteVmsCommand.java +++ b/core/src/main/java/com/cloud/agent/api/GetRemoteVmsCommand.java @@ -19,11 +19,11 @@ package com.cloud.agent.api; -@LogLevel(LogLevel.Log4jLevel.Trace) public class GetRemoteVmsCommand extends Command { String remoteIp; String username; + @LogLevel(LogLevel.Log4jLevel.Off) String password; public GetRemoteVmsCommand(String remoteIp, String username, String password) { diff --git a/core/src/main/java/com/cloud/agent/api/GetUnmanagedInstancesAnswer.java b/core/src/main/java/com/cloud/agent/api/GetUnmanagedInstancesAnswer.java index 771d472be2a..950930ec614 100644 --- a/core/src/main/java/com/cloud/agent/api/GetUnmanagedInstancesAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/GetUnmanagedInstancesAnswer.java @@ -21,10 +21,10 @@ import java.util.HashMap; import org.apache.cloudstack.vm.UnmanagedInstanceTO; -@LogLevel(LogLevel.Log4jLevel.Trace) public class GetUnmanagedInstancesAnswer extends Answer { private String instanceName; + @LogLevel(LogLevel.Log4jLevel.Trace) private HashMap unmanagedInstances; GetUnmanagedInstancesAnswer() { diff --git a/core/src/main/java/com/cloud/agent/api/GetUnmanagedInstancesCommand.java b/core/src/main/java/com/cloud/agent/api/GetUnmanagedInstancesCommand.java index 2cd80aebea1..c0b8987e152 100644 --- a/core/src/main/java/com/cloud/agent/api/GetUnmanagedInstancesCommand.java +++ b/core/src/main/java/com/cloud/agent/api/GetUnmanagedInstancesCommand.java @@ -28,10 +28,10 @@ import org.apache.commons.collections.CollectionUtils; * All managed instances will be filtered while trying to find unmanaged instances. */ -@LogLevel(LogLevel.Log4jLevel.Trace) public class GetUnmanagedInstancesCommand extends Command { String instanceName; + @LogLevel(LogLevel.Log4jLevel.Trace) List managedInstancesNames; public GetUnmanagedInstancesCommand() { diff --git a/core/src/main/java/com/cloud/agent/api/GetVolumeStatAnswer.java b/core/src/main/java/com/cloud/agent/api/GetVolumeStatAnswer.java new file mode 100644 index 00000000000..8352c97c108 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/GetVolumeStatAnswer.java @@ -0,0 +1,85 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +import com.cloud.agent.api.LogLevel.Log4jLevel; +import com.cloud.storage.Storage.StoragePoolType; + +@LogLevel(Log4jLevel.Trace) +public class GetVolumeStatAnswer extends Answer { + String poolUuid; + StoragePoolType poolType; + String volumePath; + long size = 0; + long virtualSize = 0; + + public GetVolumeStatAnswer(GetVolumeStatCommand cmd, long size, long virtualSize) { + super(cmd, true, ""); + this.poolUuid = cmd.getPoolUuid(); + this.poolType = cmd.getPoolType(); + this.volumePath = cmd.getVolumePath(); + this.size = size; + this.virtualSize = virtualSize; + } + + public GetVolumeStatAnswer(GetVolumeStatCommand cmd, boolean success, String details) { + super(cmd, success, details); + } + + protected GetVolumeStatAnswer() { + //no-args constructor for json serialization-deserialization + } + + public String getPoolUuid() { + return poolUuid; + } + + public void setPoolUuid(String poolUuid) { + this.poolUuid = poolUuid; + } + + public StoragePoolType getPoolType() { + return poolType; + } + + public void setPoolType(StoragePoolType poolType) { + this.poolType = poolType; + } + + public long getSize() { + return size; + } + + public void setSize(long size) { + this.size = size; + } + + public long getVirtualSize() { + return virtualSize; + } + + public void setVirtualSize(long virtualSize) { + this.virtualSize = virtualSize; + } + + public String getString() { + return "GetVolumeStatAnswer [poolUuid=" + poolUuid + ", poolType=" + poolType + ", volumePath=" + volumePath + ", size=" + size + ", virtualSize=" + virtualSize + "]"; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/GetVolumeStatCommand.java b/core/src/main/java/com/cloud/agent/api/GetVolumeStatCommand.java new file mode 100644 index 00000000000..1be3d6a0419 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/GetVolumeStatCommand.java @@ -0,0 +1,72 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +import com.cloud.agent.api.LogLevel.Log4jLevel; +import com.cloud.storage.Storage.StoragePoolType; + +@LogLevel(Log4jLevel.Trace) +public class GetVolumeStatCommand extends Command { + String volumePath; + StoragePoolType poolType; + String poolUuid; + + protected GetVolumeStatCommand() { + } + + public GetVolumeStatCommand(String volumePath, StoragePoolType poolType, String poolUuid) { + this.volumePath = volumePath; + this.poolType = poolType; + this.poolUuid = poolUuid; + } + + public String getVolumePath() { + return volumePath; + } + + public void setVolumePath(String volumePath) { + this.volumePath = volumePath; + } + + public StoragePoolType getPoolType() { + return poolType; + } + + public void setPoolType(StoragePoolType poolType) { + this.poolType = poolType; + } + + public String getPoolUuid() { + return poolUuid; + } + + public void setPoolUuid(String storeUuid) { + this.poolUuid = storeUuid; + } + + @Override + public boolean executeInSequence() { + return false; + } + + public String getString() { + return "GetVolumeStatCommand [volumePath=" + volumePath + ", poolType=" + poolType + ", poolUuid=" + poolUuid + "]"; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/ModifyStoragePoolCommand.java b/core/src/main/java/com/cloud/agent/api/ModifyStoragePoolCommand.java index ad05fe1d615..06940266b53 100644 --- a/core/src/main/java/com/cloud/agent/api/ModifyStoragePoolCommand.java +++ b/core/src/main/java/com/cloud/agent/api/ModifyStoragePoolCommand.java @@ -46,6 +46,10 @@ public class ModifyStoragePoolCommand extends Command { this.details = details; } + public ModifyStoragePoolCommand(boolean add, StoragePool pool, Map details) { + this(add, pool, LOCAL_PATH_PREFIX + File.separator + UUID.nameUUIDFromBytes((pool.getHostAddress() + pool.getPath()).getBytes()), details); + } + public ModifyStoragePoolCommand(boolean add, StoragePool pool) { this(add, pool, LOCAL_PATH_PREFIX + File.separator + UUID.nameUUIDFromBytes((pool.getHostAddress() + pool.getPath()).getBytes())); } diff --git a/core/src/main/java/com/cloud/agent/api/PrepareStorageClientAnswer.java b/core/src/main/java/com/cloud/agent/api/PrepareStorageClientAnswer.java new file mode 100644 index 00000000000..85afb925646 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/PrepareStorageClientAnswer.java @@ -0,0 +1,43 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +import java.util.Map; + +public class PrepareStorageClientAnswer extends Answer { + Map detailsMap; + + public PrepareStorageClientAnswer() { + super(); + } + + public PrepareStorageClientAnswer(Command command, boolean success, Map detailsMap) { + super(command, success, ""); + this.detailsMap = detailsMap; + } + + public PrepareStorageClientAnswer(Command command, boolean success, String details) { + super(command, success, details); + } + + public Map getDetailsMap() { + return detailsMap; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/PrepareStorageClientCommand.java b/core/src/main/java/com/cloud/agent/api/PrepareStorageClientCommand.java new file mode 100644 index 00000000000..8dea9c11c53 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/PrepareStorageClientCommand.java @@ -0,0 +1,56 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +import java.util.Map; + +import com.cloud.storage.Storage.StoragePoolType; + +public class PrepareStorageClientCommand extends Command { + private StoragePoolType poolType; + private String poolUuid; + private Map details; + + public PrepareStorageClientCommand() { + } + + public PrepareStorageClientCommand(StoragePoolType poolType, String poolUuid, Map details) { + this.poolType = poolType; + this.poolUuid = poolUuid; + this.details = details; + } + + @Override + public boolean executeInSequence() { + return false; + } + + public StoragePoolType getPoolType() { + return poolType; + } + + public String getPoolUuid() { + return poolUuid; + } + + public Map getDetails() { + return details; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/ReadyCommand.java b/core/src/main/java/com/cloud/agent/api/ReadyCommand.java index 637e4f54da0..42f1d264a50 100644 --- a/core/src/main/java/com/cloud/agent/api/ReadyCommand.java +++ b/core/src/main/java/com/cloud/agent/api/ReadyCommand.java @@ -34,6 +34,7 @@ public class ReadyCommand extends Command { private String lbAlgorithm; private Long lbCheckInterval; private Boolean enableHumanReadableSizes; + private String arch; public ReadyCommand(Long dcId) { super(); @@ -94,4 +95,12 @@ public class ReadyCommand extends Command { public Boolean getEnableHumanReadableSizes() { return enableHumanReadableSizes; } + + public String getArch() { + return arch; + } + + public void setArch(String arch) { + this.arch = arch; + } } diff --git a/core/src/main/java/com/cloud/agent/api/StartupCommand.java b/core/src/main/java/com/cloud/agent/api/StartupCommand.java index 5f2c00d0be6..cca5e16b585 100644 --- a/core/src/main/java/com/cloud/agent/api/StartupCommand.java +++ b/core/src/main/java/com/cloud/agent/api/StartupCommand.java @@ -47,6 +47,7 @@ public class StartupCommand extends Command { String resourceName; String gatewayIpAddress; String msHostList; + String arch; public StartupCommand(Host.Type type) { this.type = type; @@ -290,6 +291,14 @@ public class StartupCommand extends Command { this.msHostList = msHostList; } + public String getArch() { + return arch; + } + + public void setArch(String arch) { + this.arch = arch; + } + @Override public boolean executeInSequence() { return false; diff --git a/core/src/main/java/com/cloud/agent/api/StartupRoutingCommand.java b/core/src/main/java/com/cloud/agent/api/StartupRoutingCommand.java index 2d4ed8c9cc4..286fced0c58 100644 --- a/core/src/main/java/com/cloud/agent/api/StartupRoutingCommand.java +++ b/core/src/main/java/com/cloud/agent/api/StartupRoutingCommand.java @@ -32,6 +32,7 @@ public class StartupRoutingCommand extends StartupCommand { Integer cpuSockets; int cpus; long speed; + String cpuArch; long memory; long dom0MinMemory; boolean poolSync; @@ -201,4 +202,12 @@ public class StartupRoutingCommand extends StartupCommand { public void setHostHealthCheckResult(Boolean hostHealthCheckResult) { this.hostHealthCheckResult = hostHealthCheckResult; } + + public String getCpuArch() { + return cpuArch; + } + + public void setCpuArch(String cpuArch) { + this.cpuArch = cpuArch; + } } diff --git a/core/src/main/java/com/cloud/agent/api/UnprepareStorageClientAnswer.java b/core/src/main/java/com/cloud/agent/api/UnprepareStorageClientAnswer.java new file mode 100644 index 00000000000..1280293db0d --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/UnprepareStorageClientAnswer.java @@ -0,0 +1,34 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +public class UnprepareStorageClientAnswer extends Answer { + public UnprepareStorageClientAnswer() { + super(); + } + + public UnprepareStorageClientAnswer(Command command, boolean success) { + super(command, success, ""); + } + + public UnprepareStorageClientAnswer(Command command, boolean success, String details) { + super(command, success, details); + } +} diff --git a/core/src/main/java/com/cloud/agent/api/UnprepareStorageClientCommand.java b/core/src/main/java/com/cloud/agent/api/UnprepareStorageClientCommand.java new file mode 100644 index 00000000000..bebd30ca519 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/UnprepareStorageClientCommand.java @@ -0,0 +1,48 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +import com.cloud.storage.Storage.StoragePoolType; + +public class UnprepareStorageClientCommand extends Command { + private StoragePoolType poolType; + private String poolUuid; + + public UnprepareStorageClientCommand() { + } + + public UnprepareStorageClientCommand(StoragePoolType poolType, String poolUuid) { + this.poolType = poolType; + this.poolUuid = poolUuid; + } + + @Override + public boolean executeInSequence() { + return false; + } + + public StoragePoolType getPoolType() { + return poolType; + } + + public String getPoolUuid() { + return poolUuid; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/routing/SetBgpPeersAnswer.java b/core/src/main/java/com/cloud/agent/api/routing/SetBgpPeersAnswer.java new file mode 100644 index 00000000000..9645b300db5 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/routing/SetBgpPeersAnswer.java @@ -0,0 +1,46 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api.routing; + +import java.util.Arrays; + +import com.cloud.agent.api.Answer; + +public class SetBgpPeersAnswer extends Answer { + String[] results; + + protected SetBgpPeersAnswer() { + } + + public SetBgpPeersAnswer(SetBgpPeersCommand cmd, boolean success, String[] results) { + super(cmd, success, null); + if (results != null) { + assert (cmd.getBpgPeers().length == results.length) : "BGP peers and their results should be the same length"; + this.results = Arrays.copyOf(results, results.length); + } + } + + public String[] getResults() { + if (results != null) { + return Arrays.copyOf(results, results.length); + } + return null; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/routing/SetBgpPeersCommand.java b/core/src/main/java/com/cloud/agent/api/routing/SetBgpPeersCommand.java new file mode 100644 index 00000000000..36371a196c8 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/routing/SetBgpPeersCommand.java @@ -0,0 +1,39 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api.routing; + +import java.util.List; + +import org.apache.cloudstack.network.BgpPeerTO; + +public class SetBgpPeersCommand extends NetworkElementCommand { + BgpPeerTO[] bpgPeers; + + protected SetBgpPeersCommand() { + } + + public SetBgpPeersCommand(List bpgPeers) { + this.bpgPeers = bpgPeers.toArray(new BgpPeerTO[bpgPeers.size()]); + } + + public BgpPeerTO[] getBpgPeers() { + return bpgPeers; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/storage/CreateEntityDownloadURLCommand.java b/core/src/main/java/com/cloud/agent/api/storage/CreateEntityDownloadURLCommand.java index c84eceb0ca2..33403580b6f 100644 --- a/core/src/main/java/com/cloud/agent/api/storage/CreateEntityDownloadURLCommand.java +++ b/core/src/main/java/com/cloud/agent/api/storage/CreateEntityDownloadURLCommand.java @@ -23,18 +23,19 @@ import com.cloud.agent.api.to.DataTO; public class CreateEntityDownloadURLCommand extends AbstractDownloadCommand { - public CreateEntityDownloadURLCommand(String parent, String installPath, String uuid, DataTO data) { // this constructor is for creating template download url + public CreateEntityDownloadURLCommand(String parent, String installPath, String fileName, String filePath, DataTO data) { // this constructor is for creating template download url super(); this.parent = parent; // parent is required as not the template can be child of one of many parents this.installPath = installPath; - this.extractLinkUUID = uuid; + this.filenameInExtractURL = fileName; + this.filepathInExtractURL = filePath; this.data = data; } - public CreateEntityDownloadURLCommand(String installPath, String uuid) { + public CreateEntityDownloadURLCommand(String installPath, String filename) { super(); this.installPath = installPath; - this.extractLinkUUID = uuid; + this.filenameInExtractURL = filename; } public CreateEntityDownloadURLCommand() { @@ -42,7 +43,8 @@ public class CreateEntityDownloadURLCommand extends AbstractDownloadCommand { private String installPath; private String parent; - private String extractLinkUUID; + private String filenameInExtractURL; + private String filepathInExtractURL; public DataTO getData() { return data; @@ -75,12 +77,19 @@ public class CreateEntityDownloadURLCommand extends AbstractDownloadCommand { this.parent = parent; } - public String getExtractLinkUUID() { - return extractLinkUUID; + public String getFilenameInExtractURL() { + return filenameInExtractURL; } - public void setExtractLinkUUID(String extractLinkUUID) { - this.extractLinkUUID = extractLinkUUID; + public void setFilenameInExtractURL(String filenameInExtractURL) { + this.filenameInExtractURL = filenameInExtractURL; } + public String getFilepathInExtractURL() { + return filepathInExtractURL; + } + + public void setFilepathInExtractURL(String filepathInExtractURL) { + this.filepathInExtractURL = filepathInExtractURL; + } } diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java index 1396a2aa002..7bfbf786e9b 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java @@ -41,6 +41,7 @@ public class VRScripts { public static final String DHCP_CONFIG = "dhcp.json"; public static final String IP_ALIAS_CONFIG = "ip_aliases.json"; public static final String LOAD_BALANCER_CONFIG = "load_balancer.json"; + public static final String BGP_PEERS_CONFIG = "bgp_peers.json"; public static final String SYSTEM_VM_PATCHED = "patched.sh"; public final static String CONFIG_CACHE_LOCATION = "/var/cache/cloud/"; @@ -84,4 +85,5 @@ public class VRScripts { // CKS ISO mount public static final String CKS_ISO_MOUNT_SERVE = "cks_iso.sh"; + public static final String MANAGE_SERVICE = "manage_service.sh"; } diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java index 91ba7066fe7..bd632632ae8 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java @@ -35,6 +35,7 @@ import java.util.concurrent.locks.ReentrantLock; import javax.naming.ConfigurationException; import com.cloud.agent.api.HandleCksIsoCommand; +import org.apache.cloudstack.agent.routing.ManageServiceCommand; import com.cloud.agent.api.routing.UpdateNetworkCommand; import com.cloud.agent.api.to.IpAddressTO; import com.cloud.network.router.VirtualRouter; @@ -149,6 +150,10 @@ public class VirtualRoutingResource { return execute((HandleCksIsoCommand) cmd); } + if (cmd instanceof ManageServiceCommand) { + return execute((ManageServiceCommand) cmd); + } + if (_vrAggregateCommandsSet.containsKey(routerName)) { _vrAggregateCommandsSet.get(routerName).add(cmd); aggregated = true; @@ -283,6 +288,20 @@ public class VirtualRoutingResource { return new Answer(cmd, new CloudRuntimeException("Failed to update interface mtu")); } + private Answer execute(ManageServiceCommand cmd) { + String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); + String args = cmd.getAction() + " " + cmd.getServiceName(); + ExecutionResult result = _vrDeployer.executeInVR(routerIp, VRScripts.MANAGE_SERVICE, args); + if (result.isSuccess()) { + return new Answer(cmd, true, + String.format("Successfully executed action: %s on service: %s. Details: %s", + cmd.getAction(), cmd.getServiceName(), result.getDetails())); + } else { + return new Answer(cmd, false, String.format("Failed to execute action: %s on service: %s. Details: %s", + cmd.getAction(), cmd.getServiceName(), result.getDetails())); + } + } + private ExecutionResult applyConfigToVR(String routerAccessIp, ConfigItem c) { return applyConfigToVR(routerAccessIp, c, VRScripts.VR_SCRIPT_EXEC_TIMEOUT); } diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/AbstractConfigItemFacade.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/AbstractConfigItemFacade.java index 46dd801bebf..83dfa2a62ca 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/AbstractConfigItemFacade.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/AbstractConfigItemFacade.java @@ -37,6 +37,7 @@ import com.cloud.agent.api.routing.LoadBalancerConfigCommand; import com.cloud.agent.api.routing.NetworkElementCommand; import com.cloud.agent.api.routing.RemoteAccessVpnCfgCommand; import com.cloud.agent.api.routing.SavePasswordCommand; +import com.cloud.agent.api.routing.SetBgpPeersCommand; import com.cloud.agent.api.routing.SetFirewallRulesCommand; import com.cloud.agent.api.routing.SetIpv6FirewallRulesCommand; import com.cloud.agent.api.routing.SetMonitorServiceCommand; @@ -98,6 +99,7 @@ public abstract class AbstractConfigItemFacade { flyweight.put(SetSourceNatCommand.class, new SetSourceNatConfigItem()); flyweight.put(IpAssocCommand.class, new IpAssociationConfigItem()); flyweight.put(IpAssocVpcCommand.class, new IpAssociationConfigItem()); + flyweight.put(SetBgpPeersCommand.class, new SetBgpPeersConfigItem()); } protected String destinationFile; diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/SetBgpPeersConfigItem.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/SetBgpPeersConfigItem.java new file mode 100644 index 00000000000..68f4275bb6b --- /dev/null +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/SetBgpPeersConfigItem.java @@ -0,0 +1,46 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.resource.virtualnetwork.facade; + +import java.util.Arrays; +import java.util.List; + +import com.cloud.agent.api.routing.NetworkElementCommand; +import com.cloud.agent.api.routing.SetBgpPeersCommand; +import com.cloud.agent.resource.virtualnetwork.ConfigItem; +import com.cloud.agent.resource.virtualnetwork.VRScripts; +import com.cloud.agent.resource.virtualnetwork.model.BgpPeers; +import com.cloud.agent.resource.virtualnetwork.model.ConfigBase; + +public class SetBgpPeersConfigItem extends AbstractConfigItemFacade { + + @Override + public List generateConfig(final NetworkElementCommand cmd) { + final SetBgpPeersCommand command = (SetBgpPeersCommand) cmd; + return generateConfigItems(new BgpPeers(Arrays.asList(command.getBpgPeers()))); + } + + @Override + protected List generateConfigItems(final ConfigBase configuration) { + destinationFile = VRScripts.BGP_PEERS_CONFIG; + + return super.generateConfigItems(configuration); + } +} diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/BgpPeers.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/BgpPeers.java new file mode 100644 index 00000000000..54a1ab2e091 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/BgpPeers.java @@ -0,0 +1,45 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.resource.virtualnetwork.model; + +import org.apache.cloudstack.network.BgpPeerTO; + +import java.util.List; + +public class BgpPeers extends ConfigBase { + private List peers; + + public BgpPeers() { + super(ConfigBase.BGP_PEERS); + } + + public BgpPeers(List bgpPeers) { + super(ConfigBase.BGP_PEERS); + this.peers = bgpPeers; + } + + public List getPeers() { + return peers; + } + + public void setPeers(List bgpPeers) { + this.peers = bgpPeers; + } +} diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/ConfigBase.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/ConfigBase.java index ade80d71384..e370b764f22 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/ConfigBase.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/model/ConfigBase.java @@ -39,6 +39,7 @@ public abstract class ConfigBase { public static final String MONITORSERVICE = "monitorservice"; public static final String DHCP_CONFIG = "dhcpconfig"; public static final String LOAD_BALANCER = "loadbalancer"; + public final static String BGP_PEERS = "bgppeers"; private String type = UNKNOWN; diff --git a/core/src/main/java/com/cloud/network/HAProxyConfigurator.java b/core/src/main/java/com/cloud/network/HAProxyConfigurator.java index 9d07fc95c2f..e4b0a7ffff4 100644 --- a/core/src/main/java/com/cloud/network/HAProxyConfigurator.java +++ b/core/src/main/java/com/cloud/network/HAProxyConfigurator.java @@ -485,7 +485,7 @@ public class HAProxyConfigurator implements LoadBalancerConfigurator { sb.append("\tbind ").append(publicIP).append(":").append(publicPort); result.add(sb.toString()); sb = new StringBuilder(); - sb.append("\t").append("balance ").append(algorithm); + sb.append("\t").append("balance ").append(algorithm.toLowerCase()); result.add(sb.toString()); int i = 0; diff --git a/core/src/main/java/com/cloud/resource/CommandWrapper.java b/core/src/main/java/com/cloud/resource/CommandWrapper.java index a839234117b..72d1348dfe7 100644 --- a/core/src/main/java/com/cloud/resource/CommandWrapper.java +++ b/core/src/main/java/com/cloud/resource/CommandWrapper.java @@ -19,10 +19,13 @@ package com.cloud.resource; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; public abstract class CommandWrapper { protected Logger logger = LogManager.getLogger(getClass()); @@ -33,4 +36,26 @@ public abstract class CommandWrapper*?![]{}~".indexOf(c) != -1) { + sanitized.append('\\'); + } + sanitized.append(c); + } + return sanitized.toString(); + } + + public void removeDpdkPort(String portToRemove) { + logger.debug("Removing DPDK port: " + portToRemove); + int port; + try { + port = Integer.valueOf(portToRemove); + } catch (NumberFormatException nfe) { + throw new CloudRuntimeException(String.format("Invalid DPDK port specified: '%s'", portToRemove)); + } + Script.executeCommand("ovs-vsctl", "del-port", String.valueOf(port)); + } } diff --git a/core/src/main/java/org/apache/cloudstack/agent/routing/ManageServiceCommand.java b/core/src/main/java/org/apache/cloudstack/agent/routing/ManageServiceCommand.java new file mode 100644 index 00000000000..c83a5b69574 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/agent/routing/ManageServiceCommand.java @@ -0,0 +1,49 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.agent.routing; + +import com.cloud.agent.api.routing.NetworkElementCommand; + +public class ManageServiceCommand extends NetworkElementCommand { + + String serviceName; + String action; + + @Override + public boolean executeInSequence() { + return true; + } + + protected ManageServiceCommand() { + } + + public ManageServiceCommand(String serviceName, String action) { + this.serviceName = serviceName; + this.action = action; + } + + public String getServiceName() { + return serviceName; + } + + public String getAction() { + return action; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/BackupAnswer.java b/core/src/main/java/org/apache/cloudstack/backup/BackupAnswer.java new file mode 100644 index 00000000000..09f9c562150 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/BackupAnswer.java @@ -0,0 +1,59 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; + +import java.util.Map; + +public class BackupAnswer extends Answer { + private Long size; + private Long virtualSize; + private Map volumes; + + public BackupAnswer(final Command command, final boolean success, final String details) { + super(command, success, details); + } + + public Long getSize() { + return size; + } + + public void setSize(Long size) { + this.size = size; + } + + public Long getVirtualSize() { + return virtualSize; + } + + public void setVirtualSize(Long virtualSize) { + this.virtualSize = virtualSize; + } + + public Map getVolumes() { + return volumes; + } + + public void setVolumes(Map volumes) { + this.volumes = volumes; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/DeleteBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/DeleteBackupCommand.java new file mode 100644 index 00000000000..16c611af998 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/DeleteBackupCommand.java @@ -0,0 +1,76 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.LogLevel; + +public class DeleteBackupCommand extends Command { + private String backupPath; + private String backupRepoType; + private String backupRepoAddress; + @LogLevel(LogLevel.Log4jLevel.Off) + private String mountOptions; + + public DeleteBackupCommand(String backupPath, String backupRepoType, String backupRepoAddress, String mountOptions) { + super(); + this.backupPath = backupPath; + this.backupRepoType = backupRepoType; + this.backupRepoAddress = backupRepoAddress; + this.mountOptions = mountOptions; + } + + public String getBackupPath() { + return backupPath; + } + + public void setBackupPath(String backupPath) { + this.backupPath = backupPath; + } + + public String getBackupRepoType() { + return backupRepoType; + } + + public void setBackupRepoType(String backupRepoType) { + this.backupRepoType = backupRepoType; + } + + public String getBackupRepoAddress() { + return backupRepoAddress; + } + + public void setBackupRepoAddress(String backupRepoAddress) { + this.backupRepoAddress = backupRepoAddress; + } + + public String getMountOptions() { + return mountOptions == null ? "" : mountOptions; + } + + public void setMountOptions(String mountOptions) { + this.mountOptions = mountOptions; + } + + @Override + public boolean executeInSequence() { + return true; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/RestoreBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/RestoreBackupCommand.java new file mode 100644 index 00000000000..7228e35147a --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/RestoreBackupCommand.java @@ -0,0 +1,130 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.LogLevel; +import com.cloud.vm.VirtualMachine; + +import java.util.List; + +public class RestoreBackupCommand extends Command { + private String vmName; + private String backupPath; + private String backupRepoType; + private String backupRepoAddress; + private List volumePaths; + private String diskType; + private Boolean vmExists; + private String restoreVolumeUUID; + private VirtualMachine.State vmState; + + protected RestoreBackupCommand() { + super(); + } + + public String getVmName() { + return vmName; + } + + public void setVmName(String vmName) { + this.vmName = vmName; + } + + public String getBackupPath() { + return backupPath; + } + + public void setBackupPath(String backupPath) { + this.backupPath = backupPath; + } + + public String getBackupRepoType() { + return backupRepoType; + } + + public void setBackupRepoType(String backupRepoType) { + this.backupRepoType = backupRepoType; + } + + public String getBackupRepoAddress() { + return backupRepoAddress; + } + + public void setBackupRepoAddress(String backupRepoAddress) { + this.backupRepoAddress = backupRepoAddress; + } + + public List getVolumePaths() { + return volumePaths; + } + + public void setVolumePaths(List volumePaths) { + this.volumePaths = volumePaths; + } + + public Boolean isVmExists() { + return vmExists; + } + + public void setVmExists(Boolean vmExists) { + this.vmExists = vmExists; + } + + public String getDiskType() { + return diskType; + } + + public void setDiskType(String diskType) { + this.diskType = diskType; + } + + public String getMountOptions() { + return mountOptions; + } + + public void setMountOptions(String mountOptions) { + this.mountOptions = mountOptions; + } + + public String getRestoreVolumeUUID() { + return restoreVolumeUUID; + } + + public void setRestoreVolumeUUID(String restoreVolumeUUID) { + this.restoreVolumeUUID = restoreVolumeUUID; + } + + public VirtualMachine.State getVmState() { + return vmState; + } + + public void setVmState(VirtualMachine.State vmState) { + this.vmState = vmState; + } + + @LogLevel(LogLevel.Log4jLevel.Off) + private String mountOptions; + @Override + + public boolean executeInSequence() { + return true; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/TakeBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/TakeBackupCommand.java new file mode 100644 index 00000000000..93855ea1721 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/TakeBackupCommand.java @@ -0,0 +1,94 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.LogLevel; + +import java.util.List; + +public class TakeBackupCommand extends Command { + private String vmName; + private String backupPath; + private String backupRepoType; + private String backupRepoAddress; + private List volumePaths; + @LogLevel(LogLevel.Log4jLevel.Off) + private String mountOptions; + + public TakeBackupCommand(String vmName, String backupPath) { + super(); + this.vmName = vmName; + this.backupPath = backupPath; + } + + public String getVmName() { + return vmName; + } + + public void setVmName(String vmName) { + this.vmName = vmName; + } + + public String getBackupPath() { + return backupPath; + } + + public void setBackupPath(String backupPath) { + this.backupPath = backupPath; + } + + public String getBackupRepoType() { + return backupRepoType; + } + + public void setBackupRepoType(String backupRepoType) { + this.backupRepoType = backupRepoType; + } + + public String getBackupRepoAddress() { + return backupRepoAddress; + } + + public void setBackupRepoAddress(String backupRepoAddress) { + this.backupRepoAddress = backupRepoAddress; + } + + public String getMountOptions() { + return mountOptions; + } + + public void setMountOptions(String mountOptions) { + this.mountOptions = mountOptions; + } + + public List getVolumePaths() { + return volumePaths; + } + + public void setVolumePaths(List volumePaths) { + this.volumePaths = volumePaths; + } + + @Override + public boolean executeInSequence() { + return true; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java index 2bb67c80ce4..6514038ac62 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java @@ -39,6 +39,7 @@ public class VolumeObjectTO extends DownloadableObjectTO implements DataTO { private DataStoreTO dataStore; private String name; private Long size; + private Long usableSize; private String path; private Long volumeId; private String vmName; @@ -161,6 +162,10 @@ public class VolumeObjectTO extends DownloadableObjectTO implements DataTO { return size; } + public Long getUsableSize() { + return usableSize; + } + @Override public DataObjectType getObjectType() { return DataObjectType.VOLUME; @@ -178,6 +183,10 @@ public class VolumeObjectTO extends DownloadableObjectTO implements DataTO { this.size = size; } + public void setUsableSize(Long usableSize) { + this.usableSize = usableSize; + } + public void setPath(String path) { this.path = path; } diff --git a/core/src/main/resources/META-INF/cloudstack/allocator/spring-core-allocator-context.xml b/core/src/main/resources/META-INF/cloudstack/allocator/spring-core-allocator-context.xml index a0d1b4cfd43..9f04a7fb618 100644 --- a/core/src/main/resources/META-INF/cloudstack/allocator/spring-core-allocator-context.xml +++ b/core/src/main/resources/META-INF/cloudstack/allocator/spring-core-allocator-context.xml @@ -27,7 +27,7 @@ http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context.xsd" - > + > diff --git a/core/src/main/resources/META-INF/cloudstack/allocator/spring-core-lifecycle-allocator-context-inheritable.xml b/core/src/main/resources/META-INF/cloudstack/allocator/spring-core-lifecycle-allocator-context-inheritable.xml index ec3bb63aeb6..6d05f858303 100644 --- a/core/src/main/resources/META-INF/cloudstack/allocator/spring-core-lifecycle-allocator-context-inheritable.xml +++ b/core/src/main/resources/META-INF/cloudstack/allocator/spring-core-lifecycle-allocator-context-inheritable.xml @@ -27,7 +27,7 @@ http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context.xsd" - > + > diff --git a/core/src/main/resources/META-INF/cloudstack/api/spring-core-lifecycle-api-context-inheritable.xml b/core/src/main/resources/META-INF/cloudstack/api/spring-core-lifecycle-api-context-inheritable.xml index 91a35f18a89..995ed30eb5e 100644 --- a/core/src/main/resources/META-INF/cloudstack/api/spring-core-lifecycle-api-context-inheritable.xml +++ b/core/src/main/resources/META-INF/cloudstack/api/spring-core-lifecycle-api-context-inheritable.xml @@ -62,12 +62,12 @@ - + - + diff --git a/core/src/main/resources/META-INF/cloudstack/core/spring-core-registry-core-context.xml b/core/src/main/resources/META-INF/cloudstack/core/spring-core-registry-core-context.xml index 49775fe41e1..01c568d7891 100644 --- a/core/src/main/resources/META-INF/cloudstack/core/spring-core-registry-core-context.xml +++ b/core/src/main/resources/META-INF/cloudstack/core/spring-core-registry-core-context.xml @@ -262,7 +262,7 @@ - + @@ -276,11 +276,11 @@ class="org.apache.cloudstack.spring.lifecycle.registry.ExtensionRegistry"> - + - + @@ -288,10 +288,10 @@ + class="org.apache.cloudstack.spring.lifecycle.registry.ExtensionRegistry"> - + @@ -339,7 +339,7 @@ class="org.apache.cloudstack.spring.lifecycle.registry.ExtensionRegistry"> - @@ -358,4 +358,12 @@ + + + + + + + diff --git a/packaging/centos8/cloud.limits b/core/src/main/resources/META-INF/cloudstack/event/module.properties similarity index 94% rename from packaging/centos8/cloud.limits rename to core/src/main/resources/META-INF/cloudstack/event/module.properties index 7debeb29018..ab1f88e9844 100644 --- a/packaging/centos8/cloud.limits +++ b/core/src/main/resources/META-INF/cloudstack/event/module.properties @@ -1,3 +1,4 @@ +# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -14,6 +15,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +# -cloud hard nofile 4096 -cloud soft nofile 4096 +name=event +parent=core diff --git a/core/src/main/resources/META-INF/cloudstack/event/spring-core-lifecycle-event-context-inheritable.xml b/core/src/main/resources/META-INF/cloudstack/event/spring-core-lifecycle-event-context-inheritable.xml new file mode 100644 index 00000000000..63d11c65bac --- /dev/null +++ b/core/src/main/resources/META-INF/cloudstack/event/spring-core-lifecycle-event-context-inheritable.xml @@ -0,0 +1,31 @@ + + + + + + + + diff --git a/core/src/main/resources/META-INF/cloudstack/kubernetes/spring-core-lifecycle-kubernetes-context-inheritable.xml b/core/src/main/resources/META-INF/cloudstack/kubernetes/spring-core-lifecycle-kubernetes-context-inheritable.xml index df1a4b5c229..96a9a634bae 100644 --- a/core/src/main/resources/META-INF/cloudstack/kubernetes/spring-core-lifecycle-kubernetes-context-inheritable.xml +++ b/core/src/main/resources/META-INF/cloudstack/kubernetes/spring-core-lifecycle-kubernetes-context-inheritable.xml @@ -25,8 +25,8 @@ > - - + + diff --git a/core/src/main/resources/META-INF/cloudstack/network/spring-core-lifecycle-network-context-inheritable.xml b/core/src/main/resources/META-INF/cloudstack/network/spring-core-lifecycle-network-context-inheritable.xml index 2240d1f2606..e5c232267ae 100644 --- a/core/src/main/resources/META-INF/cloudstack/network/spring-core-lifecycle-network-context-inheritable.xml +++ b/core/src/main/resources/META-INF/cloudstack/network/spring-core-lifecycle-network-context-inheritable.xml @@ -92,7 +92,7 @@ - + - + - - + + + + + diff --git a/core/src/main/resources/META-INF/cloudstack/system/spring-core-system-context-inheritable.xml b/core/src/main/resources/META-INF/cloudstack/system/spring-core-system-context-inheritable.xml index 4a59e95de35..89442fc12b4 100644 --- a/core/src/main/resources/META-INF/cloudstack/system/spring-core-system-context-inheritable.xml +++ b/core/src/main/resources/META-INF/cloudstack/system/spring-core-system-context-inheritable.xml @@ -27,7 +27,7 @@ http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context.xsd" - > + > @@ -44,8 +44,8 @@ - - diff --git a/core/src/test/java/com/cloud/agent/api/routing/SetBgpPeersAnswerTest.java b/core/src/test/java/com/cloud/agent/api/routing/SetBgpPeersAnswerTest.java new file mode 100644 index 00000000000..4cd15e4465a --- /dev/null +++ b/core/src/test/java/com/cloud/agent/api/routing/SetBgpPeersAnswerTest.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.routing; + +import org.apache.cloudstack.network.BgpPeerTO; +import org.junit.Assert; +import org.junit.Test; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.List; + +public class SetBgpPeersAnswerTest { + + @Test + public void testSetBgpPeersAnswer() { + + String good = "good"; + String[] results = new String[1]; + results[0] = good; + + BgpPeerTO bgpPeerTO = Mockito.mock(BgpPeerTO.class); + List bgpPeerTOs = new ArrayList<>(); + bgpPeerTOs.add(bgpPeerTO); + SetBgpPeersCommand command = new SetBgpPeersCommand(bgpPeerTOs); + + SetBgpPeersAnswer answer = new SetBgpPeersAnswer(command, true, results); + + Assert.assertNotNull(answer.getResults()); + Assert.assertEquals(1, answer.getResults().length); + Assert.assertEquals(good, answer.getResults()[0]); + } + + @Test + public void testSetBgpPeersAnswer2() { + SetBgpPeersAnswer answer = new SetBgpPeersAnswer(); + + Assert.assertNull(answer.getResults()); + } +} diff --git a/core/src/test/java/com/cloud/agent/api/routing/SetBgpPeersCommandTest.java b/core/src/test/java/com/cloud/agent/api/routing/SetBgpPeersCommandTest.java new file mode 100644 index 00000000000..882c3b9da30 --- /dev/null +++ b/core/src/test/java/com/cloud/agent/api/routing/SetBgpPeersCommandTest.java @@ -0,0 +1,47 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.routing; + +import org.apache.cloudstack.network.BgpPeerTO; +import org.junit.Assert; +import org.junit.Test; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.List; + +public class SetBgpPeersCommandTest { + + @Test + public void testSetBgpPeersCommand1() { + SetBgpPeersCommand command = new SetBgpPeersCommand(); + Assert.assertNull(command.getBpgPeers()); + } + + @Test + public void testSetBgpPeersCommand2() { + BgpPeerTO bgpPeerTO = Mockito.mock(BgpPeerTO.class); + + List bgpPeerTOs = new ArrayList<>(); + bgpPeerTOs.add(bgpPeerTO); + + SetBgpPeersCommand command = new SetBgpPeersCommand(bgpPeerTOs); + Assert.assertNotNull(command.getBpgPeers()); + Assert.assertEquals(1, command.getBpgPeers().length); + Assert.assertEquals(bgpPeerTO, command.getBpgPeers()[0]); + } +} diff --git a/core/src/test/java/com/cloud/agent/resource/virtualnetwork/facade/SetBgpPeersConfigItemTest.java b/core/src/test/java/com/cloud/agent/resource/virtualnetwork/facade/SetBgpPeersConfigItemTest.java new file mode 100644 index 00000000000..5f177c88abf --- /dev/null +++ b/core/src/test/java/com/cloud/agent/resource/virtualnetwork/facade/SetBgpPeersConfigItemTest.java @@ -0,0 +1,56 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.resource.virtualnetwork.facade; + +import com.cloud.agent.api.routing.SetBgpPeersCommand; +import com.cloud.agent.resource.virtualnetwork.ConfigItem; +import com.cloud.agent.resource.virtualnetwork.FileConfigItem; +import com.cloud.agent.resource.virtualnetwork.ScriptConfigItem; +import com.cloud.agent.resource.virtualnetwork.VRScripts; + +import org.apache.cloudstack.network.BgpPeerTO; +import org.junit.Assert; +import org.junit.Test; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.List; + +public class SetBgpPeersConfigItemTest { + + + @Test + public void testSetBgpPeersConfigItem() { + BgpPeerTO bgpPeerTO = Mockito.mock(BgpPeerTO.class); + List bgpPeerTOs = new ArrayList<>(); + bgpPeerTOs.add(bgpPeerTO); + SetBgpPeersCommand command = new SetBgpPeersCommand(bgpPeerTOs); + + SetBgpPeersConfigItem setBgpPeersConfigItem = new SetBgpPeersConfigItem(); + + List configItems = setBgpPeersConfigItem.generateConfig(command); + Assert.assertNotNull(configItems); + + Assert.assertEquals(2, configItems.size()); + Assert.assertTrue(configItems.get(0) instanceof FileConfigItem); + Assert.assertTrue(configItems.get(1) instanceof ScriptConfigItem); + + Assert.assertEquals(VRScripts.CONFIG_PERSIST_LOCATION, ((FileConfigItem) configItems.get(0)).getFilePath()); + Assert.assertTrue((((FileConfigItem) configItems.get(0)).getFileName().startsWith(VRScripts.BGP_PEERS_CONFIG))); + Assert.assertEquals(VRScripts.UPDATE_CONFIG, ((ScriptConfigItem) configItems.get(1)).getScript()); + } +} diff --git a/core/src/test/java/com/cloud/agent/resource/virtualnetwork/model/BgpPeersTest.java b/core/src/test/java/com/cloud/agent/resource/virtualnetwork/model/BgpPeersTest.java new file mode 100644 index 00000000000..eba423e55ed --- /dev/null +++ b/core/src/test/java/com/cloud/agent/resource/virtualnetwork/model/BgpPeersTest.java @@ -0,0 +1,56 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.resource.virtualnetwork.model; + +import org.apache.cloudstack.network.BgpPeerTO; +import org.junit.Assert; +import org.junit.Test; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.List; + +public class BgpPeersTest { + + @Test + public void testBgpPeers() { + BgpPeerTO bgpPeerTO = Mockito.mock(BgpPeerTO.class); + List bgpPeerTOs = new ArrayList<>(); + bgpPeerTOs.add(bgpPeerTO); + + BgpPeers bgpPeers = new BgpPeers(bgpPeerTOs); + Assert.assertEquals(ConfigBase.BGP_PEERS, bgpPeers.getType()); + Assert.assertNotNull(bgpPeers.getPeers()); + Assert.assertEquals(1, bgpPeers.getPeers().size()); + Assert.assertEquals(bgpPeerTO, bgpPeers.getPeers().get(0)); + } + + @Test + public void testBgpPeers2() { + BgpPeers bgpPeers = new BgpPeers(); + Assert.assertEquals(ConfigBase.BGP_PEERS, bgpPeers.getType()); + + BgpPeerTO bgpPeerTO = Mockito.mock(BgpPeerTO.class); + List bgpPeerTOs = new ArrayList<>(); + bgpPeerTOs.add(bgpPeerTO); + bgpPeers.setPeers(bgpPeerTOs); + + Assert.assertNotNull(bgpPeers.getPeers()); + Assert.assertEquals(1, bgpPeers.getPeers().size()); + Assert.assertEquals(bgpPeerTO, bgpPeers.getPeers().get(0)); + } +} diff --git a/core/src/test/java/com/cloud/serializer/GsonHelperTest.java b/core/src/test/java/com/cloud/serializer/GsonHelperTest.java new file mode 100644 index 00000000000..e8b0b373060 --- /dev/null +++ b/core/src/test/java/com/cloud/serializer/GsonHelperTest.java @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.serializer; + +import com.cloud.agent.api.to.NfsTO; +import com.cloud.storage.DataStoreRole; +import com.google.gson.Gson; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +/** + * Test cases to verify working order of GsonHelper.java + * with regards to a concrete implementation of the DataStoreTO + * interface + */ +public class GsonHelperTest { + + private Gson gson; + private Gson gsonLogger; + private NfsTO nfsTO; + + @Before + public void setUp() { + gson = GsonHelper.getGson(); + gsonLogger = GsonHelper.getGsonLogger(); + nfsTO = new NfsTO("http://example.com", DataStoreRole.Primary); + } + + @Test + public void testGsonSerialization() { + String json = gson.toJson(nfsTO); + assertNotNull(json); + assertTrue(json.contains("\"_url\":\"http://example.com\"")); + assertTrue(json.contains("\"_role\":\"Primary\"")); + } + + @Test + public void testGsonDeserialization() { + String json = "{\"_url\":\"http://example.com\",\"_role\":\"Primary\"}"; + NfsTO deserializedNfsTO = gson.fromJson(json, NfsTO.class); + assertNotNull(deserializedNfsTO); + assertEquals("http://example.com", deserializedNfsTO.getUrl()); + assertEquals(DataStoreRole.Primary, deserializedNfsTO.getRole()); + } + + @Test + public void testGsonLoggerSerialization() { + String json = gsonLogger.toJson(nfsTO); + assertNotNull(json); + assertTrue(json.contains("\"_url\":\"http://example.com\"")); + assertTrue(json.contains("\"_role\":\"Primary\"")); + } + + @Test + public void testGsonLoggerDeserialization() { + String json ="{\"_url\":\"http://example.com\",\"_role\":\"Primary\"}"; + NfsTO deserializedNfsTO = gsonLogger.fromJson(json, NfsTO.class); + assertNotNull(deserializedNfsTO); + assertEquals("http://example.com", deserializedNfsTO.getUrl()); + assertEquals(DataStoreRole.Primary, deserializedNfsTO.getRole()); + } +} diff --git a/core/src/test/java/com/cloud/storage/template/OVAProcessorTest.java b/core/src/test/java/com/cloud/storage/template/OVAProcessorTest.java index 8ab54644718..8674a8df286 100644 --- a/core/src/test/java/com/cloud/storage/template/OVAProcessorTest.java +++ b/core/src/test/java/com/cloud/storage/template/OVAProcessorTest.java @@ -131,5 +131,25 @@ public class OVAProcessorTest { Assert.assertEquals(virtualSize, processor.getVirtualSize(mockFile)); Mockito.verify(mockFile, Mockito.times(0)).length(); } + @Test + public void testProcessWithLargeFileSize() throws Exception { + String templatePath = "/tmp"; + String templateName = "large_template"; + long virtualSize = 10_000_000_000L; + long actualSize = 5_000_000_000L; + Mockito.when(mockStorageLayer.exists(Mockito.anyString())).thenReturn(true); + Mockito.when(mockStorageLayer.getSize(Mockito.anyString())).thenReturn(actualSize); + Mockito.doReturn(virtualSize).when(processor).getTemplateVirtualSize(Mockito.anyString(), Mockito.anyString()); + + try (MockedConstruction + @@ -87,27 +96,29 @@ execute - - def csVersion = pom.properties['cs.version'] - def patch = pom.properties['patch.version'] - def templateList = [] - templateList.add("systemvmtemplate-${csVersion}.${patch}-kvm") - templateList.add("systemvmtemplate-${csVersion}.${patch}-vmware") - templateList.add("systemvmtemplate-${csVersion}.${patch}-xen") - templateList.add("systemvmtemplate-${csVersion}.${patch}-ovm") - templateList.add("systemvmtemplate-${csVersion}.${patch}-hyperv") - File file = new File("./engine/schema/dist/systemvm-templates/md5sum.txt") - def lines = file.readLines() - for (template in templateList) { - def data = lines.findAll { it.contains(template) } - if (data != null) { - if (data.size() > 0) { - def hypervisor = template.tokenize('-')[-1] - pom.properties["$hypervisor" + ".checksum"] = data[0].tokenize(' ')[0] + + + @@ -135,7 +146,7 @@ org.codehaus.mojo exec-maven-plugin - 1.2.1 + ${cs.exec-maven-plugin.version} systemvm-template-metadata @@ -183,7 +194,7 @@ true - ${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-kvm.qcow2.bz2 + ${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-kvm.qcow2.bz2 ${basedir}/dist/systemvm-templates/ ${kvm.checksum} @@ -219,7 +230,7 @@ true - ${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-vmware.ova + ${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-vmware.ova ${basedir}/dist/systemvm-templates/ ${vmware.checksum} @@ -255,7 +266,7 @@ true - ${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-xen.vhd.bz2 + ${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-xen.vhd.bz2 ${basedir}/dist/systemvm-templates/ ${xen.checksum} @@ -291,7 +302,7 @@ true - ${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-ovm.raw.bz2 + ${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-ovm.raw.bz2 ${basedir}/dist/systemvm-templates/ ${ovm.checksum} @@ -327,7 +338,7 @@ true - ${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-hyperv.vhd.zip + ${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-hyperv.vhd.zip ${basedir}/dist/systemvm-templates/ ${hyperv.checksum} diff --git a/engine/schema/src/main/java/com/cloud/capacity/CapacityVO.java b/engine/schema/src/main/java/com/cloud/capacity/CapacityVO.java index 132fd3fe5a2..cd62935f17e 100644 --- a/engine/schema/src/main/java/com/cloud/capacity/CapacityVO.java +++ b/engine/schema/src/main/java/com/cloud/capacity/CapacityVO.java @@ -135,8 +135,8 @@ public class CapacityVO implements Capacity { return podId; } - public void setPodId(long podId) { - this.podId = new Long(podId); + public void setPodId(Long podId) { + this.podId = podId; } @Override @@ -144,8 +144,8 @@ public class CapacityVO implements Capacity { return clusterId; } - public void setClusterId(long clusterId) { - this.clusterId = new Long(clusterId); + public void setClusterId(Long clusterId) { + this.clusterId = clusterId; } @Override diff --git a/engine/schema/src/main/java/com/cloud/dc/ASNumberRangeVO.java b/engine/schema/src/main/java/com/cloud/dc/ASNumberRangeVO.java new file mode 100644 index 00000000000..3790213b3ad --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/dc/ASNumberRangeVO.java @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc; + +import com.cloud.bgp.ASNumberRange; +import com.cloud.utils.db.GenericDao; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import java.util.Date; +import java.util.GregorianCalendar; +import java.util.UUID; + +@Entity +@Table(name = "as_number_range") +public class ASNumberRangeVO implements ASNumberRange { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "data_center_id") + private long dataCenterId; + + @Column(name = "start_as_number") + private long startASNumber; + + @Column(name = "end_as_number") + private long endASNumber; + + @Column(name = GenericDao.REMOVED_COLUMN) + private Date removed; + + @Column(name = GenericDao.CREATED_COLUMN) + private Date created; + + public ASNumberRangeVO() { + this.uuid = UUID.randomUUID().toString(); + this.created = GregorianCalendar.getInstance().getTime(); + } + + public ASNumberRangeVO(long dataCenterId, long startASNumber, long endASNumber) { + this(); + this.dataCenterId = dataCenterId; + this.startASNumber = startASNumber; + this.endASNumber = endASNumber; + } + + @Override + public long getId() { + return id; + } + + @Override + public String getUuid() { + return uuid; + } + + @Override + public long getDataCenterId() { + return dataCenterId; + } + + @Override + public long getStartASNumber() { + return startASNumber; + } + + @Override + public long getEndASNumber() { + return endASNumber; + } + + public Date getRemoved() { + return removed; + } + + @Override + public Date getCreated() { + return created; + } +} diff --git a/engine/schema/src/main/java/com/cloud/dc/ASNumberVO.java b/engine/schema/src/main/java/com/cloud/dc/ASNumberVO.java new file mode 100644 index 00000000000..529d1cfb5fe --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/dc/ASNumberVO.java @@ -0,0 +1,178 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc; + +import com.cloud.bgp.ASNumber; +import com.cloud.utils.db.GenericDao; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.Temporal; +import javax.persistence.TemporalType; +import java.util.Date; +import java.util.UUID; + +@Entity +@Table(name = "as_number") +public class ASNumberVO implements ASNumber { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "account_id") + private Long accountId; + + @Column(name = "domain_id") + private Long domainId; + + @Column(name = "as_number") + private long asNumber; + + @Column(name = "as_number_range_id") + private long asNumberRangeId; + + @Column(name = "data_center_id") + private long dataCenterId; + + @Column(name = "allocated") + @Temporal(value = TemporalType.TIMESTAMP) + private Date allocatedTime; + + @Column(name = "is_allocated") + private boolean allocated; + + @Column(name = "network_id") + private Long networkId; + + @Column(name = "vpc_id") + private Long vpcId; + + @Column(name = GenericDao.REMOVED_COLUMN) + private Date removed; + + @Column(name = GenericDao.CREATED_COLUMN) + private Date created; + + public ASNumberVO() { + this.uuid = UUID.randomUUID().toString(); + this.created = new Date(); + } + + public ASNumberVO(long asNumber, long asNumberRangeId, long dataCenterId) { + this(); + this.asNumber = asNumber; + this.asNumberRangeId = asNumberRangeId; + this.dataCenterId = dataCenterId; + } + + @Override + public long getId() { + return id; + } + + @Override + public String getUuid() { + return uuid; + } + + public void setAccountId(Long accountId) { + this.accountId = accountId; + } + + @Override + public Long getAccountId() { + return accountId; + } + + public void setDomainId(Long domainId) { + this.domainId = domainId; + } + + @Override + public Long getDomainId() { + return domainId; + } + + @Override + public long getAsNumber() { + return asNumber; + } + + @Override + public long getAsNumberRangeId() { + return asNumberRangeId; + } + + @Override + public long getDataCenterId() { + return dataCenterId; + } + + public void setAllocatedTime(Date date) { + this.allocatedTime = date; + } + + @Override + public Date getAllocatedTime() { + return allocatedTime; + } + + public void setAllocated(boolean allocated) { + this.allocated = allocated; + } + + @Override + public boolean isAllocated() { + return allocated; + } + + public void setNetworkId(Long networkId) { + this.networkId = networkId; + } + + @Override + public Long getNetworkId() { + return networkId; + } + + @Override + public Date getRemoved() { + return removed; + } + + @Override + public Date getCreated() { + return created; + } + + public Long getVpcId() { + return vpcId; + } + + public void setVpcId(Long vpcId) { + this.vpcId = vpcId; + } +} diff --git a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java index c2058ad5644..0e40f8475c1 100644 --- a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java @@ -20,7 +20,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.ConfigKey.Scope; import org.apache.cloudstack.framework.config.ScopedConfigStorage; @@ -136,8 +135,8 @@ public class ClusterDetailsDaoImpl extends GenericDaoBase key) { - ClusterDetailsVO vo = findDetail(id, key.key()); + public String getConfigValue(long id, String key) { + ClusterDetailsVO vo = findDetail(id, key); return vo == null ? null : vo.getValue(); } diff --git a/engine/schema/src/main/java/com/cloud/dc/ClusterVO.java b/engine/schema/src/main/java/com/cloud/dc/ClusterVO.java index 90591690eb0..434901ef5b3 100644 --- a/engine/schema/src/main/java/com/cloud/dc/ClusterVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/ClusterVO.java @@ -16,12 +16,14 @@ // under the License. package com.cloud.dc; +import com.cloud.cpu.CPU; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.org.Cluster; import com.cloud.org.Grouping; import com.cloud.org.Managed.ManagedState; import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.util.CPUArchConverter; import org.apache.cloudstack.util.HypervisorTypeConverter; import javax.persistence.Column; @@ -69,6 +71,10 @@ public class ClusterVO implements Cluster { @Enumerated(value = EnumType.STRING) AllocationState allocationState; + @Column(name = "arch") + @Convert(converter = CPUArchConverter.class) + private String arch; + @Column(name = "managed_state") @Enumerated(value = EnumType.STRING) ManagedState managedState; @@ -200,6 +206,15 @@ public class ClusterVO implements Cluster { return PartitionType.Cluster; } + @Override + public CPU.CPUArch getArch() { + return CPU.CPUArch.fromType(arch); + } + + public void setArch(String arch) { + this.arch = arch; + } + @Override public String toString() { return String.format("Cluster {id: \"%s\", name: \"%s\", uuid: \"%s\"}", id, name, uuid); diff --git a/engine/schema/src/main/java/com/cloud/dc/DataCenterIpAddressVO.java b/engine/schema/src/main/java/com/cloud/dc/DataCenterIpAddressVO.java index 3d68cc3d9a8..874b05673eb 100644 --- a/engine/schema/src/main/java/com/cloud/dc/DataCenterIpAddressVO.java +++ b/engine/schema/src/main/java/com/cloud/dc/DataCenterIpAddressVO.java @@ -55,7 +55,7 @@ public class DataCenterIpAddressVO implements InternalIdentity { String reservationId; @Column(name = "nic_id") - private Long instanceId; + private Long nicId; @Column(name = "mac_address") long macAddress; @@ -88,12 +88,12 @@ public class DataCenterIpAddressVO implements InternalIdentity { return id; } - public Long getInstanceId() { - return instanceId; + public Long getNicId() { + return nicId; } - public void setInstanceId(Long instanceId) { - this.instanceId = instanceId; + public void setNicId(Long nicId) { + this.nicId = nicId; } public long getPodId() { diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ASNumberDao.java b/engine/schema/src/main/java/com/cloud/dc/dao/ASNumberDao.java new file mode 100644 index 00000000000..192f6bbaf31 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ASNumberDao.java @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc.dao; + +import com.cloud.dc.ASNumberVO; +import com.cloud.user.Account; +import com.cloud.utils.Pair; +import com.cloud.utils.db.GenericDao; + +import java.util.List; + +public interface ASNumberDao extends GenericDao { + + Pair, Integer> searchAndCountByZoneOrRangeOrAllocated(Long zoneId, Long asnRangeId, Integer asNumber, Long networkId, Long vpcId, + Boolean allocated, Long accountId, Long domainId, String keyword, Account caller, + Long startIndex, Long pageSizeVal); + ASNumberVO findByAsNumber(Long asNumber); + + ASNumberVO findOneByAllocationStateAndZone(long zoneId, boolean allocated); + + List listAllocatedByASRange(Long asRangeId); + + ASNumberVO findByZoneAndNetworkId(long zoneId, long networkId); + ASNumberVO findByZoneAndVpcId(long zoneId, long vpcId); + + int removeASRangeNumbers(long rangeId); +} diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ASNumberDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/ASNumberDaoImpl.java new file mode 100644 index 00000000000..1d2adf4d424 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ASNumberDaoImpl.java @@ -0,0 +1,141 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc.dao; + +import com.cloud.dc.ASNumberVO; +import com.cloud.user.Account; +import com.cloud.utils.Pair; +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +import java.util.Arrays; +import java.util.List; + +public class ASNumberDaoImpl extends GenericDaoBase implements ASNumberDao { + + private final SearchBuilder asNumberSearch; + + public ASNumberDaoImpl() { + asNumberSearch = createSearchBuilder(); + asNumberSearch.and("zoneId", asNumberSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + asNumberSearch.and("rangeId", asNumberSearch.entity().getAsNumberRangeId(), SearchCriteria.Op.EQ); + asNumberSearch.and("isAllocated", asNumberSearch.entity().isAllocated(), SearchCriteria.Op.EQ); + asNumberSearch.and("asNumber", asNumberSearch.entity().getAsNumber(), SearchCriteria.Op.EQ); + asNumberSearch.and("networkId", asNumberSearch.entity().getNetworkId(), SearchCriteria.Op.EQ); + asNumberSearch.and("vpcId", asNumberSearch.entity().getVpcId(), SearchCriteria.Op.EQ); + asNumberSearch.and("accountId", asNumberSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + asNumberSearch.and("domainId", asNumberSearch.entity().getDomainId(), SearchCriteria.Op.EQ); + asNumberSearch.done(); + } + + @Override + public Pair, Integer> searchAndCountByZoneOrRangeOrAllocated(Long zoneId, Long asnRangeId, + Integer asNumber, Long networkId, Long vpcId, + Boolean allocated, + Long accountId, Long domainId, + String keyword, Account caller, + Long startIndex, Long pageSizeVal) { + SearchCriteria sc = asNumberSearch.create(); + if (zoneId != null) { + sc.setParameters("zoneId", zoneId); + } + if (asnRangeId != null) { + sc.setParameters("rangeId", asnRangeId); + } + if (networkId != null) { + sc.setParameters("networkId", networkId); + } + if (vpcId != null) { + sc.setParameters("vpcId", vpcId); + } + if (allocated != null) { + sc.setParameters("isAllocated", allocated); + } + if (asNumber != null) { + sc.setParameters("asNumber", asNumber); + } + if (accountId != null) { + sc.setParameters("accountId", accountId); + } + if (domainId != null) { + sc.setParameters("domainId", domainId); + } + if (keyword != null) { + sc.addAnd("asNumber", SearchCriteria.Op.LIKE, "%" + keyword + "%"); + } + if (Arrays.asList(Account.Type.DOMAIN_ADMIN, Account.Type.RESOURCE_DOMAIN_ADMIN).contains(caller.getType())) { + SearchCriteria scc = asNumberSearch.create(); + scc.addOr("domainId", SearchCriteria.Op.NULL); + scc.addOr("domainId", SearchCriteria.Op.EQ, caller.getDomainId()); + sc.addAnd("domainId", SearchCriteria.Op.SC, scc); + } else if (Arrays.asList(Account.Type.NORMAL, Account.Type.PROJECT).contains(caller.getType())) { + SearchCriteria scc = asNumberSearch.create(); + scc.addOr("domainId", SearchCriteria.Op.NULL); + scc.addOr("accountId", SearchCriteria.Op.EQ, caller.getAccountId()); + sc.addAnd("domainId", SearchCriteria.Op.SC, scc); + } + Filter searchFilter = new Filter(ASNumberVO.class, "id", true, startIndex, pageSizeVal); + return searchAndCount(sc, searchFilter); + } + + @Override + public ASNumberVO findByAsNumber(Long asNumber) { + SearchCriteria sc = asNumberSearch.create(); + sc.setParameters("asNumber", asNumber); + return findOneBy(sc); + } + + @Override + public ASNumberVO findOneByAllocationStateAndZone(long zoneId, boolean allocated) { + SearchCriteria sc = asNumberSearch.create(); + sc.setParameters("zoneId", zoneId); + sc.setParameters("isAllocated", allocated); + return findOneBy(sc); + } + + @Override + public List listAllocatedByASRange(Long asRangeId) { + SearchCriteria sc = asNumberSearch.create(); + sc.setParameters("rangeId", asRangeId); + sc.setParameters("isAllocated", true); + return listBy(sc); + } + + public ASNumberVO findByZoneAndNetworkId(long zoneId, long networkId) { + SearchCriteria sc = asNumberSearch.create(); + sc.setParameters("zoneId", zoneId); + sc.setParameters("networkId", networkId); + return findOneBy(sc); + } + + @Override + public ASNumberVO findByZoneAndVpcId(long zoneId, long vpcId) { + SearchCriteria sc = asNumberSearch.create(); + sc.setParameters("zoneId", zoneId); + sc.setParameters("vpcId", vpcId); + return findOneBy(sc); + } + + @Override + public int removeASRangeNumbers(long rangeId) { + SearchCriteria sc = asNumberSearch.create(); + sc.setParameters("rangeId", rangeId); + return remove(sc); + } +} diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ASNumberRangeDao.java b/engine/schema/src/main/java/com/cloud/dc/dao/ASNumberRangeDao.java new file mode 100644 index 00000000000..3309a6f5fe5 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ASNumberRangeDao.java @@ -0,0 +1,27 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc.dao; + +import com.cloud.dc.ASNumberRangeVO; +import com.cloud.utils.db.GenericDao; + +import java.util.List; + +public interface ASNumberRangeDao extends GenericDao { + + List listByZoneId(long zoneId); +} diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ASNumberRangeDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/ASNumberRangeDaoImpl.java new file mode 100644 index 00000000000..4a4170685dc --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ASNumberRangeDaoImpl.java @@ -0,0 +1,42 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc.dao; + +import com.cloud.dc.ASNumberRangeVO; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +import java.util.List; + +public class ASNumberRangeDaoImpl extends GenericDaoBase implements ASNumberRangeDao { + + private final SearchBuilder searchBuilder; + + public ASNumberRangeDaoImpl() { + searchBuilder = createSearchBuilder(); + searchBuilder.and("zoneId", searchBuilder.entity().getDataCenterId(), SearchCriteria.Op.EQ); + searchBuilder.done(); + } + + @Override + public List listByZoneId(long zoneId) { + SearchCriteria sc = searchBuilder.create(); + sc.setParameters("zoneId", zoneId); + return listBy(sc); + } +} diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java index ab9c5cab8c4..6ecfdaeb058 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.dc.dao; +import com.cloud.cpu.CPU; import com.cloud.dc.ClusterVO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.utils.db.GenericDao; @@ -50,4 +51,8 @@ public interface ClusterDao extends GenericDao { List listAllClusters(Long zoneId); boolean getSupportsResigning(long clusterId); + + List getClustersArchsByZone(long zoneId); + + List listClustersByArchAndZoneId(long zoneId, CPU.CPUArch arch); } diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java index 4d9bedba966..9a56f0f2d94 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.dc.dao; +import com.cloud.cpu.CPU; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterDetailsVO; import com.cloud.dc.ClusterVO; @@ -43,6 +44,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; @Component public class ClusterDaoImpl extends GenericDaoBase implements ClusterDao { @@ -54,6 +56,8 @@ public class ClusterDaoImpl extends GenericDaoBase implements C protected final SearchBuilder ZoneHyTypeSearch; protected final SearchBuilder ZoneClusterSearch; protected final SearchBuilder ClusterSearch; + protected final SearchBuilder ClusterDistinctArchSearch; + protected final SearchBuilder ClusterArchSearch; protected GenericSearchBuilder ClusterIdSearch; @@ -104,6 +108,16 @@ public class ClusterDaoImpl extends GenericDaoBase implements C ClusterSearch = createSearchBuilder(); ClusterSearch.select(null, Func.DISTINCT, ClusterSearch.entity().getHypervisorType()); ClusterIdSearch.done(); + + ClusterDistinctArchSearch = createSearchBuilder(); + ClusterDistinctArchSearch.and("dataCenterId", ClusterDistinctArchSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + ClusterDistinctArchSearch.select(null, Func.DISTINCT, ClusterDistinctArchSearch.entity().getArch()); + ClusterDistinctArchSearch.done(); + + ClusterArchSearch = createSearchBuilder(); + ClusterArchSearch.and("dataCenterId", ClusterArchSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + ClusterArchSearch.and("arch", ClusterArchSearch.entity().getArch(), SearchCriteria.Op.EQ); + ClusterArchSearch.done(); } @Override @@ -301,4 +315,20 @@ public class ClusterDaoImpl extends GenericDaoBase implements C return false; } + + @Override + public List getClustersArchsByZone(long zoneId) { + SearchCriteria sc = ClusterDistinctArchSearch.create(); + sc.setParameters("dataCenterId", zoneId); + List clusters = listBy(sc); + return clusters.stream().map(ClusterVO::getArch).collect(Collectors.toList()); + } + + @Override + public List listClustersByArchAndZoneId(long zoneId, CPU.CPUArch arch) { + SearchCriteria sc = ClusterArchSearch.create(); + sc.setParameters("dataCenterId", zoneId); + sc.setParameters("arch", arch); + return listBy(sc); + } } diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDaoImpl.java index 2776b09c2a1..7719e5adfc7 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDaoImpl.java @@ -164,8 +164,8 @@ public class DataCenterDaoImpl extends GenericDaoBase implem } @Override - public void releasePrivateIpAddress(String ipAddress, long dcId, Long instanceId) { - _ipAllocDao.releaseIpAddress(ipAddress, dcId, instanceId); + public void releasePrivateIpAddress(String ipAddress, long dcId, Long nicId) { + _ipAllocDao.releaseIpAddress(ipAddress, dcId, nicId); } @Override @@ -179,8 +179,8 @@ public class DataCenterDaoImpl extends GenericDaoBase implem } @Override - public void releaseLinkLocalIpAddress(String ipAddress, long dcId, Long instanceId) { - _linkLocalIpAllocDao.releaseIpAddress(ipAddress, dcId, instanceId); + public void releaseLinkLocalIpAddress(String ipAddress, long dcId, Long nicId) { + _linkLocalIpAllocDao.releaseIpAddress(ipAddress, dcId, nicId); } @Override @@ -226,9 +226,9 @@ public class DataCenterDaoImpl extends GenericDaoBase implem } @Override - public PrivateAllocationData allocatePrivateIpAddress(long dcId, long podId, long instanceId, String reservationId, boolean forSystemVms) { - _ipAllocDao.releaseIpAddress(instanceId); - DataCenterIpAddressVO vo = _ipAllocDao.takeIpAddress(dcId, podId, instanceId, reservationId, forSystemVms); + public PrivateAllocationData allocatePrivateIpAddress(long dcId, long podId, long nicId, String reservationId, boolean forSystemVms) { + _ipAllocDao.releaseIpAddress(nicId); + DataCenterIpAddressVO vo = _ipAllocDao.takeIpAddress(dcId, podId, nicId, reservationId, forSystemVms); if (vo == null) { return null; } @@ -242,8 +242,8 @@ public class DataCenterDaoImpl extends GenericDaoBase implem } @Override - public String allocateLinkLocalIpAddress(long dcId, long podId, long instanceId, String reservationId) { - DataCenterLinkLocalIpAddressVO vo = _linkLocalIpAllocDao.takeIpAddress(dcId, podId, instanceId, reservationId); + public String allocateLinkLocalIpAddress(long dcId, long podId, long nicId, String reservationId) { + DataCenterLinkLocalIpAddressVO vo = _linkLocalIpAllocDao.takeIpAddress(dcId, podId, nicId, reservationId); if (vo == null) { return null; } diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDetailsDaoImpl.java index e36c8ebd6c7..bb03a96d02e 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDetailsDaoImpl.java @@ -44,8 +44,8 @@ public class DataCenterDetailsDaoImpl extends ResourceDetailsDaoBase key) { - ResourceDetail vo = findDetail(id, key.key()); + public String getConfigValue(long id, String key) { + ResourceDetail vo = findDetail(id, key); return vo == null ? null : vo.getValue(); } diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java index c23137095e6..48b9c83c64c 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java @@ -51,7 +51,7 @@ public class DataCenterIpAddressDaoImpl extends GenericDaoBase sc = AllFieldsSearch.create(); sc.setParameters("pod", podId); sc.setParameters("taken", (Date)null); @@ -71,7 +71,7 @@ public class DataCenterIpAddressDaoImpl extends GenericDaoBase sc = AllFieldsSearch.create(); sc.setParameters("ip", ipAddress); sc.setParameters("dc", dcId); - sc.setParameters("instance", instanceId); + sc.setParameters("nic", nicId); DataCenterIpAddressVO vo = createForUpdate(); vo.setTakenAt(null); - vo.setInstanceId(null); + vo.setNicId(null); vo.setReservationId(null); update(vo, sc); } @@ -186,15 +186,15 @@ public class DataCenterIpAddressDaoImpl extends GenericDaoBase { void deleteDetails(long domainId); void update(long domainId, Map details); + + String getActualValue(DomainDetailVO domainDetailVO); } diff --git a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java index dad3fe9ad1e..b9721a2e58c 100644 --- a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDetailsDaoImpl.java @@ -22,18 +22,20 @@ import java.util.Map; import javax.inject.Inject; +import org.apache.cloudstack.framework.config.ConfigKey.Scope; +import org.apache.cloudstack.framework.config.ScopedConfigStorage; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.config.impl.ConfigurationVO; + import com.cloud.domain.DomainDetailVO; import com.cloud.domain.DomainVO; +import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.TransactionLegacy; -import org.apache.cloudstack.framework.config.ConfigKey; -import org.apache.cloudstack.framework.config.ConfigKey.Scope; -import org.apache.cloudstack.framework.config.ScopedConfigStorage; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; public class DomainDetailsDaoImpl extends GenericDaoBase implements DomainDetailsDao, ScopedConfigStorage { protected final SearchBuilder domainSearch; @@ -106,17 +108,17 @@ public class DomainDetailsDaoImpl extends GenericDaoBase i } @Override - public String getConfigValue(long id, ConfigKey key) { + public String getConfigValue(long id, String key) { DomainDetailVO vo = null; String enableDomainSettingsForChildDomain = _configDao.getValue("enable.domain.settings.for.child.domain"); if (!Boolean.parseBoolean(enableDomainSettingsForChildDomain)) { - vo = findDetail(id, key.key()); - return vo == null ? null : vo.getValue(); + vo = findDetail(id, key); + return vo == null ? null : getActualValue(vo); } DomainVO domain = _domainDao.findById(id); // if value is not configured in domain then check its parent domain till ROOT while (domain != null) { - vo = findDetail(domain.getId(), key.key()); + vo = findDetail(domain.getId(), key); if (vo != null) { break; } else if (domain.getParent() != null) { @@ -125,6 +127,15 @@ public class DomainDetailsDaoImpl extends GenericDaoBase i break; } } - return vo == null ? null : vo.getValue(); + return vo == null ? null : getActualValue(vo); + } + + @Override + public String getActualValue(DomainDetailVO domainDetailVO) { + ConfigurationVO configurationVO = _configDao.findByName(domainDetailVO.getName()); + if (configurationVO != null && configurationVO.isEncrypted()) { + return DBEncryptionUtil.decrypt(domainDetailVO.getValue()); + } + return domainDetailVO.getValue(); } } diff --git a/engine/schema/src/main/java/com/cloud/host/HostVO.java b/engine/schema/src/main/java/com/cloud/host/HostVO.java index 3e64d20d0e2..b5b634a73a7 100644 --- a/engine/schema/src/main/java/com/cloud/host/HostVO.java +++ b/engine/schema/src/main/java/com/cloud/host/HostVO.java @@ -16,13 +16,13 @@ // under the License. package com.cloud.host; -import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.UUID; import javax.persistence.Column; @@ -42,9 +42,12 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; import javax.persistence.Transient; +import com.cloud.cpu.CPU; +import org.apache.cloudstack.util.CPUArchConverter; import org.apache.cloudstack.util.HypervisorTypeConverter; import org.apache.cloudstack.utils.jsinterpreter.TagAsRuleHelper; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.BooleanUtils; import org.apache.commons.lang3.StringUtils; @@ -152,6 +155,10 @@ public class HostVO implements Host { @Column(name = "hypervisor_version") private String hypervisorVersion; + @Column(name = "arch") + @Convert(converter = CPUArchConverter.class) + private CPU.CPUArch arch; + @Column(name = "update_count", updatable = true, nullable = false) protected long updated; // This field should be updated everytime the state is updated. There's no set method in the vo object because it is done with in the dao code. @@ -737,6 +744,15 @@ public class HostVO implements Host { return resourceState; } + @Override + public CPU.CPUArch getArch() { + return arch; + } + + public void setArch(CPU.CPUArch arch) { + this.arch = arch; + } + public void setResourceState(ResourceState state) { resourceState = state; } @@ -768,27 +784,48 @@ public class HostVO implements Host { this.uuid = uuid; } - public boolean checkHostServiceOfferingAndTemplateTags(ServiceOffering serviceOffering, VirtualMachineTemplate template) { - if (serviceOffering == null || template == null) { - return false; - } + private Set getHostServiceOfferingAndTemplateStrictTags(ServiceOffering serviceOffering, VirtualMachineTemplate template, Set strictHostTags) { if (StringUtils.isEmpty(serviceOffering.getHostTag()) && StringUtils.isEmpty(template.getTemplateTag())) { - return true; + return new HashSet<>(); } - if (getHostTags() == null) { - return false; - } - HashSet hostTagsSet = new HashSet<>(getHostTags()); - List tags = new ArrayList<>(); + List hostTagsList = getHostTags(); + HashSet hostTagsSet = CollectionUtils.isNotEmpty(hostTagsList) ? new HashSet<>(hostTagsList) : new HashSet<>(); + HashSet tags = new HashSet<>(); if (StringUtils.isNotEmpty(serviceOffering.getHostTag())) { tags.addAll(Arrays.asList(serviceOffering.getHostTag().split(","))); } - if (StringUtils.isNotEmpty(template.getTemplateTag()) && !tags.contains(template.getTemplateTag())) { + if (StringUtils.isNotEmpty(template.getTemplateTag())) { tags.add(template.getTemplateTag()); } + tags.removeIf(tag -> !strictHostTags.contains(tag)); + tags.removeAll(hostTagsSet); + return tags; + } + + public boolean checkHostServiceOfferingAndTemplateTags(ServiceOffering serviceOffering, VirtualMachineTemplate template, Set strictHostTags) { + if (serviceOffering == null || template == null) { + return false; + } + Set tags = getHostServiceOfferingAndTemplateStrictTags(serviceOffering, template, strictHostTags); + if (tags.isEmpty()) { + return true; + } + List hostTagsList = getHostTags(); + HashSet hostTagsSet = CollectionUtils.isNotEmpty(hostTagsList) ? new HashSet<>(hostTagsList) : new HashSet<>(); return hostTagsSet.containsAll(tags); } + public Set getHostServiceOfferingAndTemplateMissingTags(ServiceOffering serviceOffering, VirtualMachineTemplate template, Set strictHostTags) { + Set tags = getHostServiceOfferingAndTemplateStrictTags(serviceOffering, template, strictHostTags); + if (tags.isEmpty()) { + return new HashSet<>(); + } + List hostTagsList = getHostTags(); + HashSet hostTagsSet = CollectionUtils.isNotEmpty(hostTagsList) ? new HashSet<>(hostTagsList) : new HashSet<>(); + tags.removeAll(hostTagsSet); + return tags; + } + public boolean checkHostServiceOfferingTags(ServiceOffering serviceOffering) { if (serviceOffering == null) { return false; diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java index ca180e2323f..a2df6db44e5 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java @@ -141,6 +141,8 @@ public interface HostDao extends GenericDao, StateDao listByHostCapability(Host.Type type, Long clusterId, Long podId, long dcId, String hostCapabilty); + List listByClusterHypervisorTypeAndHostCapability(Long clusterId, HypervisorType hypervisorType, String hostCapabilty); + List listByClusterAndHypervisorType(long clusterId, HypervisorType hypervisorType); HostVO findByName(String name); @@ -167,4 +169,6 @@ public interface HostDao extends GenericDao, StateDao findHostsWithTagRuleThatMatchComputeOferringTags(String computeOfferingTags); List findClustersThatMatchHostTagRule(String computeOfferingTags); + + List listSsvmHostsWithPendingMigrateJobsOrderedByJobCount(); } diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java index 5faa877b458..63950294654 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java @@ -341,6 +341,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao ClusterHypervisorSearch.and("hypervisor", ClusterHypervisorSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ); ClusterHypervisorSearch.and("type", ClusterHypervisorSearch.entity().getType(), SearchCriteria.Op.EQ); ClusterHypervisorSearch.and("status", ClusterHypervisorSearch.entity().getStatus(), SearchCriteria.Op.EQ); + ClusterHypervisorSearch.and("resourceState", ClusterHypervisorSearch.entity().getResourceState(), SearchCriteria.Op.EQ); ClusterHypervisorSearch.done(); UnmanagedDirectConnectSearch = createSearchBuilder(); @@ -1370,6 +1371,31 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return new ArrayList<>(result); } + @Override + public List listSsvmHostsWithPendingMigrateJobsOrderedByJobCount() { + String query = "SELECT cel.host_id, COUNT(*) " + + "FROM cmd_exec_log cel " + + "JOIN host h ON cel.host_id = h.id " + + "WHERE h.removed IS NULL " + + "GROUP BY cel.host_id " + + "ORDER BY 2"; + + TransactionLegacy txn = TransactionLegacy.currentTxn(); + List result = new ArrayList<>(); + + PreparedStatement pstmt; + try { + pstmt = txn.prepareAutoCloseStatement(query); + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + result.add((long) rs.getInt(1)); + } + } catch (SQLException e) { + logger.warn("SQLException caught while listing SSVMs with least migrate jobs.", e); + } + return result; + } + private String getHostIdsByComputeTags(List offeringTags){ List questionMarks = new ArrayList(); offeringTags.forEach((tag) -> { questionMarks.add("?"); }); @@ -1506,12 +1532,42 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return listBy(sc); } + @Override + public List listByClusterHypervisorTypeAndHostCapability(Long clusterId, HypervisorType hypervisorType, String hostCapabilty) { + SearchBuilder hostCapabilitySearch = _detailsDao.createSearchBuilder(); + DetailVO tagEntity = hostCapabilitySearch.entity(); + hostCapabilitySearch.and("capability", tagEntity.getName(), SearchCriteria.Op.EQ); + hostCapabilitySearch.and("value", tagEntity.getValue(), SearchCriteria.Op.EQ); + + SearchBuilder hostSearch = createSearchBuilder(); + HostVO entity = hostSearch.entity(); + hostSearch.and("clusterId", entity.getClusterId(), SearchCriteria.Op.EQ); + hostSearch.and("hypervisor", entity.getHypervisorType(), SearchCriteria.Op.EQ); + hostSearch.and("type", entity.getType(), SearchCriteria.Op.EQ); + hostSearch.and("status", entity.getStatus(), SearchCriteria.Op.EQ); + hostSearch.and("resourceState", entity.getResourceState(), SearchCriteria.Op.EQ); + hostSearch.join("hostCapabilitySearch", hostCapabilitySearch, entity.getId(), tagEntity.getHostId(), JoinBuilder.JoinType.INNER); + + SearchCriteria sc = hostSearch.create(); + sc.setJoinParameters("hostCapabilitySearch", "value", Boolean.toString(true)); + sc.setJoinParameters("hostCapabilitySearch", "capability", hostCapabilty); + + sc.setParameters("clusterId", clusterId); + sc.setParameters("hypervisor", hypervisorType); + sc.setParameters("type", Type.Routing); + sc.setParameters("status", Status.Up); + sc.setParameters("resourceState", ResourceState.Enabled); + return listBy(sc); + } + + @Override public List listByClusterAndHypervisorType(long clusterId, HypervisorType hypervisorType) { SearchCriteria sc = ClusterHypervisorSearch.create(); sc.setParameters("clusterId", clusterId); sc.setParameters("hypervisor", hypervisorType); sc.setParameters("type", Type.Routing); sc.setParameters("status", Status.Up); + sc.setParameters("resourceState", ResourceState.Enabled); return listBy(sc); } diff --git a/engine/schema/src/main/java/com/cloud/hypervisor/HypervisorCapabilitiesVO.java b/engine/schema/src/main/java/com/cloud/hypervisor/HypervisorCapabilitiesVO.java index 4455c7491dd..a3b03280fdf 100644 --- a/engine/schema/src/main/java/com/cloud/hypervisor/HypervisorCapabilitiesVO.java +++ b/engine/schema/src/main/java/com/cloud/hypervisor/HypervisorCapabilitiesVO.java @@ -80,6 +80,18 @@ public class HypervisorCapabilitiesVO implements HypervisorCapabilities { this.uuid = UUID.randomUUID().toString(); } + public HypervisorCapabilitiesVO(HypervisorCapabilitiesVO source) { + this.hypervisorType = source.getHypervisorType(); + this.hypervisorVersion = source.getHypervisorVersion(); + this.maxGuestsLimit = source.getMaxGuestsLimit(); + this.maxDataVolumesLimit = source.getMaxDataVolumesLimit(); + this.maxHostsPerCluster = source.getMaxHostsPerCluster(); + this.securityGroupEnabled = source.isSecurityGroupEnabled(); + this.storageMotionSupported = source.isStorageMotionSupported(); + this.vmSnapshotEnabled = source.isVmSnapshotEnabled(); + this.uuid = UUID.randomUUID().toString(); + } + /** * @param hypervisorType the hypervisorType to set */ diff --git a/engine/schema/src/main/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDao.java b/engine/schema/src/main/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDao.java index 4b25c63403e..718511746c2 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDao.java +++ b/engine/schema/src/main/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDao.java @@ -35,4 +35,6 @@ public interface AutoScaleVmGroupVmMapDao extends GenericDao vmIds, Long batchSize); } diff --git a/engine/schema/src/main/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDaoImpl.java index 8fca4c26f9a..1ae55d97da2 100644 --- a/engine/schema/src/main/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDaoImpl.java @@ -18,7 +18,10 @@ package com.cloud.network.as.dao; import java.util.List; +import javax.annotation.PostConstruct; +import javax.inject.Inject; +import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; import com.cloud.network.as.AutoScaleVmGroupVmMapVO; @@ -31,9 +34,6 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.VMInstanceDao; -import javax.annotation.PostConstruct; -import javax.inject.Inject; - @Component public class AutoScaleVmGroupVmMapDaoImpl extends GenericDaoBase implements AutoScaleVmGroupVmMapDao { @@ -115,4 +115,16 @@ public class AutoScaleVmGroupVmMapDaoImpl extends GenericDaoBase= 0; } + + @Override + public int expungeByVmList(List vmIds, Long batchSize) { + if (CollectionUtils.isEmpty(vmIds)) { + return 0; + } + SearchBuilder sb = createSearchBuilder(); + sb.and("vmIds", sb.entity().getInstanceId(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("vmIds", vmIds.toArray()); + return batchExpunge(sc, batchSize); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDao.java b/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDao.java index 0c59d10c80b..e5c3a661bcd 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDao.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDao.java @@ -74,4 +74,6 @@ public interface FirewallRulesDao extends GenericDao { void loadDestinationCidrs(FirewallRuleVO rule); FirewallRuleVO findByNetworkIdAndPorts(long networkId, int startPort, int endPort); + + List listRoutingIngressFirewallRules(long networkId); } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDaoImpl.java index 78d86ced32f..5eaa44ca012 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesDaoImpl.java @@ -51,6 +51,7 @@ public class FirewallRulesDaoImpl extends GenericDaoBase i protected SearchBuilder FirewallByPortsAndNetwork; protected final SearchBuilder SystemRuleSearch; protected final GenericSearchBuilder RulesByIpCount; + protected final SearchBuilder RoutingFirewallRulesSearch; @Inject protected FirewallRulesCidrsDao _firewallRulesCidrsDao; @@ -111,6 +112,13 @@ public class FirewallRulesDaoImpl extends GenericDaoBase i FirewallByPortsAndNetwork.and("sourcePortStart", FirewallByPortsAndNetwork.entity().getSourcePortStart(), Op.EQ); FirewallByPortsAndNetwork.and("sourcePortEnd", FirewallByPortsAndNetwork.entity().getSourcePortEnd(), Op.EQ); FirewallByPortsAndNetwork.done(); + + RoutingFirewallRulesSearch = createSearchBuilder(); + RoutingFirewallRulesSearch.and("networkId", RoutingFirewallRulesSearch.entity().getNetworkId(), Op.EQ); + RoutingFirewallRulesSearch.and("purpose", RoutingFirewallRulesSearch.entity().getPurpose(), Op.EQ); + RoutingFirewallRulesSearch.and("trafficType", RoutingFirewallRulesSearch.entity().getTrafficType(), Op.EQ); + RoutingFirewallRulesSearch.and("ipId", RoutingFirewallRulesSearch.entity().getSourceIpAddressId(), Op.NULL); + RoutingFirewallRulesSearch.done(); } @Override @@ -403,4 +411,12 @@ public class FirewallRulesDaoImpl extends GenericDaoBase i return findOneBy(sc); } + @Override + public List listRoutingIngressFirewallRules(long networkId) { + SearchCriteria sc = RoutingFirewallRulesSearch.create(); + sc.setParameters("networkId", networkId); + sc.setParameters("purpose", Purpose.Firewall); + sc.setParameters("trafficType", TrafficType.Ingress); + return listBy(sc); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDao.java b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDao.java index b1b1e1cf757..3f8c36ac94e 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDao.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDao.java @@ -105,4 +105,6 @@ public interface IPAddressDao extends GenericDao { void buildQuarantineSearchCriteria(SearchCriteria sc); IPAddressVO findBySourceNetworkIdAndDatacenterIdAndState(long sourceNetworkId, long dataCenterId, State state); + + int expungeByVmList(List vmIds, Long batchSize); } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java index ca779f7e9ce..aa143838c34 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java @@ -26,6 +26,7 @@ import javax.inject.Inject; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.resourcedetail.dao.UserIpAddressDetailsDao; +import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; import com.cloud.dc.Vlan.VlanType; @@ -561,4 +562,16 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen sc.setParameters("state", State.Free); return findOneBy(sc); } + + @Override + public int expungeByVmList(List vmIds, Long batchSize) { + if (CollectionUtils.isEmpty(vmIds)) { + return 0; + } + SearchBuilder sb = createSearchBuilder(); + sb.and("vmIds", sb.entity().getAssociatedWithVmId(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("vmIds", vmIds.toArray()); + return batchExpunge(sc, batchSize); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/InlineLoadBalancerNicMapDao.java b/engine/schema/src/main/java/com/cloud/network/dao/InlineLoadBalancerNicMapDao.java index ac3845beffe..b1831b407a4 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/InlineLoadBalancerNicMapDao.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/InlineLoadBalancerNicMapDao.java @@ -16,10 +16,14 @@ // under the License. package com.cloud.network.dao; +import java.util.List; + import com.cloud.utils.db.GenericDao; public interface InlineLoadBalancerNicMapDao extends GenericDao { InlineLoadBalancerNicMapVO findByPublicIpAddress(String publicIpAddress); InlineLoadBalancerNicMapVO findByNicId(long nicId); + int expungeByNicList(List nicIds, Long batchSize); + } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/InlineLoadBalancerNicMapDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/InlineLoadBalancerNicMapDaoImpl.java index 1c3f231f9c1..d64ba8b4155 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/InlineLoadBalancerNicMapDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/InlineLoadBalancerNicMapDaoImpl.java @@ -17,9 +17,13 @@ package com.cloud.network.dao; +import java.util.List; + +import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @Component @@ -41,4 +45,15 @@ public class InlineLoadBalancerNicMapDaoImpl extends GenericDaoBase nicIds, Long batchSize) { + if (CollectionUtils.isEmpty(nicIds)) { + return 0; + } + SearchBuilder sb = createSearchBuilder(); + sb.and("nicIds", sb.entity().getNicId(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("nicIds", nicIds.toArray()); + return batchExpunge(sc, batchSize); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVMMapDao.java b/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVMMapDao.java index a25534b7010..be2941d5cb2 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVMMapDao.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVMMapDao.java @@ -42,4 +42,5 @@ public interface LoadBalancerVMMapDao extends GenericDao vmIds, Long batchSize); } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVMMapDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVMMapDaoImpl.java index b32320a84cb..dc37cdeefe3 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVMMapDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/LoadBalancerVMMapDaoImpl.java @@ -18,11 +18,12 @@ package com.cloud.network.dao; import java.util.List; - +import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; @@ -135,4 +136,16 @@ public class LoadBalancerVMMapDaoImpl extends GenericDaoBase vmIds, Long batchSize) { + if (CollectionUtils.isEmpty(vmIds)) { + return 0; + } + SearchBuilder sb = createSearchBuilder(); + sb.and("vmIds", sb.entity().getInstanceId(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("vmIds", vmIds.toArray()); + return batchExpunge(sc, batchSize); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/NetworkServiceMapDao.java b/engine/schema/src/main/java/com/cloud/network/dao/NetworkServiceMapDao.java index e0509f80c2a..1675c89811a 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/NetworkServiceMapDao.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/NetworkServiceMapDao.java @@ -30,6 +30,8 @@ import com.cloud.utils.db.GenericDao; public interface NetworkServiceMapDao extends GenericDao { boolean areServicesSupportedInNetwork(long networkId, Service... services); + boolean isAnyServiceSupportedInNetwork(long networkId, Provider provider, Service... services); + boolean canProviderSupportServiceInNetwork(long networkId, Service service, Provider provider); List getServicesInNetwork(long networkId); diff --git a/engine/schema/src/main/java/com/cloud/network/dao/NetworkServiceMapDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/NetworkServiceMapDaoImpl.java index 31e083075fa..f25bee5da47 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/NetworkServiceMapDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/NetworkServiceMapDaoImpl.java @@ -90,6 +90,28 @@ public class NetworkServiceMapDaoImpl extends GenericDaoBase sc = MultipleServicesSearch.create(); + sc.setParameters("networkId", networkId); + sc.setParameters("provider", provider.getName()); + + if (services != null) { + String[] servicesStr = new String[services.length]; + + int i = 0; + for (Service service : services) { + servicesStr[i] = service.getName(); + i++; + } + + sc.setParameters("service", (Object[])servicesStr); + } + + List networkServices = listBy(sc); + return !networkServices.isEmpty(); + } + @Override public boolean canProviderSupportServiceInNetwork(long networkId, Service service, Provider provider) { SearchCriteria sc = AllFieldsSearch.create(); diff --git a/engine/schema/src/main/java/com/cloud/network/dao/NetworkVO.java b/engine/schema/src/main/java/com/cloud/network/dao/NetworkVO.java index 581f7899069..02abaacd854 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/NetworkVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/NetworkVO.java @@ -203,6 +203,9 @@ public class NetworkVO implements Network { @Column(name = "private_mtu") Integer privateMtu; + @Transient + Integer networkCidrSize; + public NetworkVO() { uuid = UUID.randomUUID().toString(); } @@ -444,6 +447,7 @@ public class NetworkVO implements Network { return gateway; } + @Override public void setGateway(String gateway) { this.gateway = gateway; } @@ -457,6 +461,7 @@ public class NetworkVO implements Network { return cidr; } + @Override public void setCidr(String cidr) { this.cidr = cidr; } @@ -759,4 +764,13 @@ public class NetworkVO implements Network { public void setPrivateMtu(Integer privateMtu) { this.privateMtu = privateMtu; } + + @Override + public Integer getNetworkCidrSize() { + return networkCidrSize; + } + + public void setNetworkCidrSize(Integer networkCidrSize) { + this.networkCidrSize = networkCidrSize; + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/OpRouterMonitorServiceDao.java b/engine/schema/src/main/java/com/cloud/network/dao/OpRouterMonitorServiceDao.java index ebc0f1af227..0516e26e13a 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/OpRouterMonitorServiceDao.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/OpRouterMonitorServiceDao.java @@ -18,8 +18,12 @@ package com.cloud.network.dao; +import java.util.List; + import com.cloud.utils.db.GenericDao; public interface OpRouterMonitorServiceDao extends GenericDao { + int expungeByVmList(List vmIds, Long batchSize); + } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/OpRouterMonitorServiceDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/OpRouterMonitorServiceDaoImpl.java index 451320ac9b6..a8e818cfb18 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/OpRouterMonitorServiceDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/OpRouterMonitorServiceDaoImpl.java @@ -17,10 +17,27 @@ package com.cloud.network.dao; -import com.cloud.utils.db.GenericDaoBase; +import java.util.List; + +import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + @Component public class OpRouterMonitorServiceDaoImpl extends GenericDaoBase implements OpRouterMonitorServiceDao { + @Override + public int expungeByVmList(List vmIds, Long batchSize) { + if (CollectionUtils.isEmpty(vmIds)) { + return 0; + } + SearchBuilder sb = createSearchBuilder(); + sb.and("vmIds", sb.entity().getId(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("vmIds", vmIds.toArray()); + return batchExpunge(sc, batchSize); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDao.java b/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDao.java index 622665fc003..a737f1b9a20 100644 --- a/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDao.java +++ b/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDao.java @@ -49,4 +49,5 @@ public interface PortForwardingRulesDao extends GenericDao listByNetworkAndDestIpAddr(String ip4Address, long networkId); PortForwardingRuleVO findByNetworkAndPorts(long networkId, int startPort, int endPort); + int expungeByVmList(List vmIds, Long batchSize); } diff --git a/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java index 2653515596b..aa46015acff 100644 --- a/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java @@ -20,6 +20,7 @@ import java.util.List; import javax.inject.Inject; +import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; import com.cloud.network.dao.FirewallRulesCidrsDao; @@ -181,4 +182,16 @@ public class PortForwardingRulesDaoImpl extends GenericDaoBase vmIds, Long batchSize) { + if (CollectionUtils.isEmpty(vmIds)) { + return 0; + } + SearchBuilder sb = createSearchBuilder(); + sb.and("vmIds", sb.entity().getVirtualMachineId(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("vmIds", vmIds.toArray()); + return batchExpunge(sc, batchSize); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java index 350dda3f3b8..41254ba4a8b 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/VpcOfferingVO.java @@ -28,6 +28,7 @@ import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; +import com.cloud.offering.NetworkOffering; import com.cloud.utils.db.GenericDao; @Entity @@ -61,8 +62,8 @@ public class VpcOfferingVO implements VpcOffering { @Column(name = "for_nsx") boolean forNsx = false; - @Column(name = "nsx_mode") - String nsxMode; + @Column(name = "network_mode") + NetworkOffering.NetworkMode networkMode; @Column(name = GenericDao.REMOVED_COLUMN) Date removed; @@ -85,6 +86,13 @@ public class VpcOfferingVO implements VpcOffering { @Column(name = "sort_key") int sortKey; + @Column(name="routing_mode") + @Enumerated(value = EnumType.STRING) + private NetworkOffering.RoutingMode routingMode; + + @Column(name = "specify_as_number") + private Boolean specifyAsNumber = false; + public VpcOfferingVO() { this.uuid = UUID.randomUUID().toString(); } @@ -158,12 +166,12 @@ public class VpcOfferingVO implements VpcOffering { this.forNsx = forNsx; } - public String getNsxMode() { - return nsxMode; + public NetworkOffering.NetworkMode getNetworkMode() { + return networkMode; } - public void setNsxMode(String nsxMode) { - this.nsxMode = nsxMode; + public void setNetworkMode(NetworkOffering.NetworkMode networkMode) { + this.networkMode = networkMode; } public void setUniqueName(String uniqueName) { @@ -226,4 +234,21 @@ public class VpcOfferingVO implements VpcOffering { return sortKey; } + @Override + public NetworkOffering.RoutingMode getRoutingMode() { + return routingMode; + } + + public void setRoutingMode(NetworkOffering.RoutingMode routingMode) { + this.routingMode = routingMode; + } + + @Override + public Boolean isSpecifyAsNumber() { + return specifyAsNumber; + } + + public void setSpecifyAsNumber(Boolean specifyAsNumber) { + this.specifyAsNumber = specifyAsNumber; + } } diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java b/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java index c2024e06c51..27d8227284b 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/VpcVO.java @@ -159,6 +159,10 @@ public class VpcVO implements Vpc { return cidr; } + public void setCidr(String cidr) { + this.cidr = cidr; + } + @Override public long getDomainId() { return domainId; diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/dao/VpcOfferingDao.java b/engine/schema/src/main/java/com/cloud/network/vpc/dao/VpcOfferingDao.java index 264a1ebc75e..aa17723f0b1 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/dao/VpcOfferingDao.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/dao/VpcOfferingDao.java @@ -33,4 +33,6 @@ public interface VpcOfferingDao extends GenericDao { NetUtils.InternetProtocol getVpcOfferingInternetProtocol(long offeringId); boolean isIpv6Supported(long offeringId); + + boolean isRoutedVpc(long offeringId); } diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/dao/VpcOfferingDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/vpc/dao/VpcOfferingDaoImpl.java index 1cc6a21da76..b83fd891305 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/dao/VpcOfferingDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/dao/VpcOfferingDaoImpl.java @@ -19,6 +19,7 @@ package com.cloud.network.vpc.dao; import javax.inject.Inject; +import com.cloud.offering.NetworkOffering; import org.apache.cloudstack.api.ApiConstants; import org.springframework.stereotype.Component; @@ -84,4 +85,9 @@ public class VpcOfferingDaoImpl extends GenericDaoBase impl NetUtils.InternetProtocol internetProtocol = getVpcOfferingInternetProtocol(offeringId); return NetUtils.InternetProtocol.isIpv6EnabledProtocol(internetProtocol); } + + @Override + public boolean isRoutedVpc(long offeringId) { + return NetworkOffering.NetworkMode.ROUTED.equals(findById(offeringId).getNetworkMode()); + } } diff --git a/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java b/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java index b2fabf2e3cd..0bf110757d7 100644 --- a/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java +++ b/engine/schema/src/main/java/com/cloud/offerings/NetworkOfferingVO.java @@ -139,8 +139,8 @@ public class NetworkOfferingVO implements NetworkOffering { @Column(name = "for_nsx") boolean forNsx = false; - @Column(name = "nsx_mode") - String nsxMode; + @Column(name = "network_mode") + NetworkMode networkMode; @Column(name = "egress_default_policy") boolean egressdefaultpolicy; @@ -174,6 +174,13 @@ public class NetworkOfferingVO implements NetworkOffering { @Column(name="service_package_id") String servicePackageUuid = null; + @Column(name="routing_mode") + @Enumerated(value = EnumType.STRING) + private RoutingMode routingMode; + + @Column(name = "specify_as_number") + private Boolean specifyAsNumber = false; + @Override public boolean isKeepAliveEnabled() { return keepAliveEnabled; @@ -211,12 +218,12 @@ public class NetworkOfferingVO implements NetworkOffering { } @Override - public String getNsxMode() { - return nsxMode; + public NetworkMode getNetworkMode() { + return networkMode; } - public void setNsxMode(String nsxMode) { - this.nsxMode = nsxMode; + public void setNetworkMode(NetworkMode networkMode) { + this.networkMode = networkMode; } @Override @@ -582,4 +589,21 @@ public class NetworkOfferingVO implements NetworkOffering { public boolean isSupportsVmAutoScaling() { return supportsVmAutoScaling; } + + @Override + public RoutingMode getRoutingMode() { + return routingMode; + } + + public void setRoutingMode(RoutingMode routingMode) { + this.routingMode = routingMode; + } + + public Boolean isSpecifyAsNumber() { + return specifyAsNumber; + } + + public void setSpecifyAsNumber(Boolean specifyAsNumber) { + this.specifyAsNumber = specifyAsNumber; + } } diff --git a/engine/schema/src/main/java/com/cloud/offerings/dao/NetworkOfferingDao.java b/engine/schema/src/main/java/com/cloud/offerings/dao/NetworkOfferingDao.java index 381d2144df1..abb63a10d06 100644 --- a/engine/schema/src/main/java/com/cloud/offerings/dao/NetworkOfferingDao.java +++ b/engine/schema/src/main/java/com/cloud/offerings/dao/NetworkOfferingDao.java @@ -76,4 +76,6 @@ public interface NetworkOfferingDao extends GenericDao NetUtils.InternetProtocol getNetworkOfferingInternetProtocol(long offeringId, NetUtils.InternetProtocol defaultProtocol); boolean isIpv6Supported(long offeringId); + + boolean isRoutedNetwork(long offeringId); } diff --git a/engine/schema/src/main/java/com/cloud/offerings/dao/NetworkOfferingDaoImpl.java b/engine/schema/src/main/java/com/cloud/offerings/dao/NetworkOfferingDaoImpl.java index 823ea36b97f..9bc74b13932 100644 --- a/engine/schema/src/main/java/com/cloud/offerings/dao/NetworkOfferingDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/offerings/dao/NetworkOfferingDaoImpl.java @@ -292,4 +292,9 @@ public class NetworkOfferingDaoImpl extends GenericDaoBase { public void expungeExpiredRecords(Date cutTime); public Integer getCopyCmdCountForSSVM(Long id); + int expungeByVmList(List vmIds, Long batchSize); } diff --git a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java index f89a1bbf4cc..a37acdf6029 100644 --- a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java @@ -19,6 +19,7 @@ package com.cloud.secstorage; import java.util.Date; import java.util.List; +import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; @@ -57,4 +58,16 @@ public class CommandExecLogDaoImpl extends GenericDaoBase copyCmds = customSearch(sc, null); return copyCmds.size(); } + + @Override + public int expungeByVmList(List vmIds, Long batchSize) { + if (CollectionUtils.isEmpty(vmIds)) { + return 0; + } + SearchBuilder sb = createSearchBuilder(); + sb.and("vmIds", sb.entity().getInstanceId(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("vmIds", vmIds.toArray()); + return batchExpunge(sc, batchSize); + } } diff --git a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java index d086ad1dac1..48e63d8e2b5 100644 --- a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java +++ b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java @@ -54,7 +54,7 @@ public interface ServiceOfferingDao extends GenericDao List listPublicByCpuAndMemory(Integer cpus, Integer memory); - ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskOfferingId); - List listByHostTag(String tag); + + ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskOfferingId, boolean includingRemoved); } diff --git a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java index 34ac7c47521..706dcdc1b7b 100644 --- a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java @@ -282,10 +282,10 @@ public class ServiceOfferingDaoImpl extends GenericDaoBase sc = SearchComputeOfferingByComputeOnlyDiskOffering.create(); sc.setParameters("disk_offering_id", diskOfferingId); - List vos = listBy(sc); + List vos = includingRemoved ? listIncludingRemovedBy(sc) : listBy(sc); if (vos.size() == 0) { return null; } diff --git a/engine/schema/src/main/java/com/cloud/storage/BucketVO.java b/engine/schema/src/main/java/com/cloud/storage/BucketVO.java index 181b02e5a1b..53017447c07 100644 --- a/engine/schema/src/main/java/com/cloud/storage/BucketVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/BucketVO.java @@ -97,17 +97,23 @@ public class BucketVO implements Bucket { String uuid; public BucketVO() { + this.uuid = UUID.randomUUID().toString(); + } + + public BucketVO(String name) { + this.uuid = UUID.randomUUID().toString(); + this.name = name; + this.state = State.Allocated; } public BucketVO(long accountId, long domainId, long objectStoreId, String name, Integer quota, boolean versioning, - boolean encryption, boolean objectLock, String policy) - { + boolean encryption, boolean objectLock, String policy) { this.accountId = accountId; this.domainId = domainId; this.objectStoreId = objectStoreId; this.name = name; - state = State.Allocated; - uuid = UUID.randomUUID().toString(); + this.state = State.Allocated; + this.uuid = UUID.randomUUID().toString(); this.quota = quota; this.versioning = versioning; this.encryption = encryption; diff --git a/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java b/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java index 80a890aacad..86e0da53666 100644 --- a/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/SnapshotScheduleVO.java @@ -29,6 +29,8 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; import com.cloud.storage.snapshot.SnapshotSchedule; +import org.apache.commons.lang3.builder.ReflectionToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; @Entity @Table(name = "snapshot_schedule") @@ -132,4 +134,11 @@ public class SnapshotScheduleVO implements SnapshotSchedule { public void setUuid(String uuid) { this.uuid = uuid; } + + @Override + public String toString() { + ReflectionToStringBuilder reflectionToStringBuilder = new ReflectionToStringBuilder(this, ToStringStyle.JSON_STYLE); + reflectionToStringBuilder.setExcludeFieldNames("id"); + return reflectionToStringBuilder.toString(); + } } diff --git a/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java b/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java index 87bbe233e3b..6a15e5e584f 100644 --- a/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/VMTemplateVO.java @@ -32,7 +32,9 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; import javax.persistence.Transient; +import com.cloud.cpu.CPU; import com.cloud.user.UserData; +import org.apache.cloudstack.util.CPUArchConverter; import org.apache.cloudstack.util.HypervisorTypeConverter; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @@ -170,6 +172,10 @@ public class VMTemplateVO implements VirtualMachineTemplate { @Enumerated(value = EnumType.STRING) UserData.UserDataOverridePolicy userDataLinkPolicy; + @Column(name = "arch") + @Convert(converter = CPUArchConverter.class) + private CPU.CPUArch arch; + @Override public String getUniqueName() { return uniqueName; @@ -212,7 +218,7 @@ public class VMTemplateVO implements VirtualMachineTemplate { public VMTemplateVO(long id, String name, ImageFormat format, boolean isPublic, boolean featured, boolean isExtractable, TemplateType type, String url, boolean requiresHvm, int bits, long accountId, String cksum, String displayText, boolean enablePassword, long guestOSId, boolean bootable, HypervisorType hyperType, String templateTag, Map details, boolean sshKeyEnabled, boolean isDynamicallyScalable, boolean directDownload, - boolean deployAsIs) { + boolean deployAsIs, CPU.CPUArch arch) { this(id, name, format, @@ -238,6 +244,7 @@ public class VMTemplateVO implements VirtualMachineTemplate { state = State.Active; this.directDownload = directDownload; this.deployAsIs = deployAsIs; + this.arch = arch; } public static VMTemplateVO createPreHostIso(Long id, String uniqueName, String name, ImageFormat format, boolean isPublic, boolean featured, TemplateType type, @@ -684,4 +691,13 @@ public class VMTemplateVO implements VirtualMachineTemplate { this.userDataLinkPolicy = userDataLinkPolicy; } + @Override + public CPU.CPUArch getArch() { + return arch; + } + + public void setArch(CPU.CPUArch arch) { + this.arch = arch; + } + } diff --git a/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java b/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java index e12859ea8d6..c105acf40b8 100644 --- a/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/VolumeVO.java @@ -182,6 +182,9 @@ public class VolumeVO implements Volume { @Column(name = "encrypt_format") private String encryptFormat; + @Column(name = "delete_protection") + private boolean deleteProtection; + // Real Constructor public VolumeVO(Type type, String name, long dcId, long domainId, @@ -678,4 +681,13 @@ public class VolumeVO implements Volume { public String getEncryptFormat() { return encryptFormat; } public void setEncryptFormat(String encryptFormat) { this.encryptFormat = encryptFormat; } + + @Override + public boolean isDeleteProtection() { + return deleteProtection; + } + + public void setDeleteProtection(boolean deleteProtection) { + this.deleteProtection = deleteProtection; + } } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/DiskOfferingDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/DiskOfferingDaoImpl.java index 853a9998226..93e74766277 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/DiskOfferingDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/DiskOfferingDaoImpl.java @@ -32,7 +32,6 @@ import org.springframework.stereotype.Component; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.Storage; import com.cloud.utils.db.Attribute; -import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -60,16 +59,6 @@ public class DiskOfferingDaoImpl extends GenericDaoBase im _computeOnlyAttr = _allAttributes.get("computeOnly"); } - @Override - public List searchIncludingRemoved(SearchCriteria sc, final Filter filter, final Boolean lock, final boolean cache) { - return super.searchIncludingRemoved(sc, filter, lock, cache); - } - - @Override - public List customSearchIncludingRemoved(SearchCriteria sc, final Filter filter) { - return super.customSearchIncludingRemoved(sc, filter); - } - @Override protected List executeList(final String sql, final Object... params) { StringBuilder builder = new StringBuilder(sql); diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDao.java index 998d0bbd724..171634fb104 100755 --- a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDao.java @@ -57,4 +57,5 @@ public interface SnapshotDao extends GenericDao, StateDao listByIds(Object... ids); + List searchByVolumes(List volumeIds); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDaoImpl.java index 030d10d6682..f5fc9c47d03 100755 --- a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDaoImpl.java @@ -18,11 +18,13 @@ package com.cloud.storage.dao; import java.sql.PreparedStatement; import java.sql.ResultSet; +import java.util.ArrayList; import java.util.List; import javax.annotation.PostConstruct; import javax.inject.Inject; +import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; import com.cloud.server.ResourceTag.ResourceObjectType; @@ -285,4 +287,16 @@ public class SnapshotDaoImpl extends GenericDaoBase implements sc.setParameters("status", (Object[]) status); return listBy(sc, null); } + + @Override + public List searchByVolumes(List volumeIds) { + if (CollectionUtils.isEmpty(volumeIds)) { + return new ArrayList<>(); + } + SearchBuilder sb = createSearchBuilder(); + sb.and("volumeIds", sb.entity().getVolumeId(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("volumeIds", volumeIds.toArray()); + return search(sc, null); + } } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDetailsDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDetailsDao.java index 43bb5b3d4d5..02a0355d92d 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDetailsDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDetailsDao.java @@ -18,9 +18,12 @@ */ package com.cloud.storage.dao; +import java.util.List; + import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; import com.cloud.utils.db.GenericDao; public interface SnapshotDetailsDao extends GenericDao, ResourceDetailsDao { + public List findDetailsByZoneAndKey(long dcId, String key); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDetailsDaoImpl.java index e4ae22cd021..584a2481726 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDetailsDaoImpl.java @@ -18,11 +18,44 @@ */ package com.cloud.storage.dao; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; + import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; +import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.exception.CloudRuntimeException; + public class SnapshotDetailsDaoImpl extends ResourceDetailsDaoBase implements SnapshotDetailsDao { + private static final String GET_SNAPSHOT_DETAILS_ON_ZONE = "SELECT s.* FROM snapshot_details s LEFT JOIN snapshots ss ON ss.id=s.snapshot_id WHERE ss.data_center_id = ? AND s.name = ?"; + @Override public void addDetail(long resourceId, String key, String value, boolean display) { super.addDetail(new SnapshotDetailsVO(resourceId, key, value, display)); } + + public List findDetailsByZoneAndKey(long dcId, String key) { + StringBuilder sql = new StringBuilder(GET_SNAPSHOT_DETAILS_ON_ZONE); + TransactionLegacy txn = TransactionLegacy.currentTxn(); + List snapshotDetailsOnZone = new ArrayList(); + try (PreparedStatement pstmt = txn.prepareStatement(sql.toString());) { + if (pstmt != null) { + pstmt.setLong(1, dcId); + pstmt.setString(2, key); + try (ResultSet rs = pstmt.executeQuery();) { + while (rs.next()) { + snapshotDetailsOnZone.add(toEntityBean(rs, false)); + } + } catch (SQLException e) { + throw new CloudRuntimeException("Could not find details by given zone and key due to:" + e.getMessage(), e); + } + } + return snapshotDetailsOnZone; + } catch (SQLException e) { + throw new CloudRuntimeException("Could not find details by given zone and key due to:" + e.getMessage(), e); + } + } } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotScheduleDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotScheduleDao.java index 7ca0a3915f5..284a42cf9e1 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotScheduleDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotScheduleDao.java @@ -27,13 +27,11 @@ import com.cloud.utils.db.GenericDao; */ public interface SnapshotScheduleDao extends GenericDao { - List getCoincidingSnapshotSchedules(long volumeId, Date date); - List getSchedulesToExecute(Date currentTimestamp); - SnapshotScheduleVO getCurrentSchedule(Long volumeId, Long policyId, boolean executing); + List getSchedulesAssignedWithAsyncJob(); - SnapshotScheduleVO findOneByVolume(long volumeId); + SnapshotScheduleVO getCurrentSchedule(Long volumeId, Long policyId, boolean executing); SnapshotScheduleVO findOneByVolumePolicy(long volumeId, long policyId); diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotScheduleDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotScheduleDaoImpl.java index 925d02dd90b..14669ce1d43 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotScheduleDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotScheduleDaoImpl.java @@ -32,7 +32,7 @@ import com.cloud.utils.db.SearchCriteria; public class SnapshotScheduleDaoImpl extends GenericDaoBase implements SnapshotScheduleDao { protected final SearchBuilder executableSchedulesSearch; protected final SearchBuilder coincidingSchedulesSearch; - private final SearchBuilder VolumeIdSearch; + protected final SearchBuilder schedulesAssignedWithAsyncJob; private final SearchBuilder VolumeIdPolicyIdSearch; protected SnapshotScheduleDaoImpl() { @@ -48,36 +48,14 @@ public class SnapshotScheduleDaoImpl extends GenericDaoBase getCoincidingSnapshotSchedules(long volumeId, Date date) { - SearchCriteria sc = coincidingSchedulesSearch.create(); - sc.setParameters("volumeId", volumeId); - sc.setParameters("scheduledTimestamp", date); - // Don't return manual snapshots. They will be executed through another - // code path. - sc.addAnd("policyId", SearchCriteria.Op.NEQ, 1L); - return listBy(sc); - } - - @Override - public SnapshotScheduleVO findOneByVolume(long volumeId) { - SearchCriteria sc = VolumeIdSearch.create(); - sc.setParameters("volumeId", volumeId); - return findOneBy(sc); + schedulesAssignedWithAsyncJob = createSearchBuilder(); + schedulesAssignedWithAsyncJob.and("asyncJobId", schedulesAssignedWithAsyncJob.entity().getAsyncJobId(), SearchCriteria.Op.NNULL); + schedulesAssignedWithAsyncJob.done(); } @Override @@ -98,6 +76,11 @@ public class SnapshotScheduleDaoImpl extends GenericDaoBase getSchedulesAssignedWithAsyncJob() { + return listBy(schedulesAssignedWithAsyncJob.create()); + } + /** * {@inheritDoc} */ diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java index 0c39a8c581a..376933f92e7 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java @@ -17,6 +17,10 @@ package com.cloud.storage.dao; +import java.util.List; + +import javax.inject.Inject; + import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.ConfigKey.Scope; import org.apache.cloudstack.framework.config.ScopedConfigStorage; @@ -26,9 +30,6 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import javax.inject.Inject; -import java.util.List; - public class StoragePoolDetailsDaoImpl extends ResourceDetailsDaoBase implements StoragePoolDetailsDao, ScopedConfigStorage { @Inject @@ -43,8 +44,8 @@ public class StoragePoolDetailsDaoImpl extends ResourceDetailsDaoBase key) { - StoragePoolDetailVO vo = findDetail(id, key.key()); + public String getConfigValue(long id, String key) { + StoragePoolDetailVO vo = findDetail(id, key); return vo == null ? null : vo.getValue(); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java index b099a6d6bdb..62ef5b7570d 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java @@ -41,4 +41,6 @@ public interface StoragePoolHostDao extends GenericDao public void deleteStoragePoolHostDetails(long hostId, long poolId); List listByHostId(long hostId); + + Pair, Integer> listByPoolIdNotInCluster(long clusterId, long poolId); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java index 9e7bdca1181..987a42f410e 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java @@ -23,12 +23,18 @@ import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; +import javax.annotation.PostConstruct; +import javax.inject.Inject; + import org.springframework.stereotype.Component; +import com.cloud.host.HostVO; import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; import com.cloud.storage.StoragePoolHostVO; import com.cloud.utils.Pair; import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.TransactionLegacy; @@ -40,6 +46,11 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase HostSearch; protected final SearchBuilder PoolHostSearch; + protected SearchBuilder poolNotInClusterSearch; + + @Inject + HostDao hostDao; + protected static final String HOST_FOR_POOL_SEARCH = "SELECT * FROM storage_pool_host_ref ph, host h where ph.host_id = h.id and ph.pool_id=? and h.status=? "; protected static final String HOSTS_FOR_POOLS_SEARCH = "SELECT DISTINCT(ph.host_id) FROM storage_pool_host_ref ph, host h WHERE ph.host_id = h.id AND h.status = 'Up' AND resource_state = 'Enabled' AND ph.pool_id IN (?)"; @@ -68,6 +79,15 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase hostSearch = hostDao.createSearchBuilder(); + poolNotInClusterSearch.join("hostSearch", hostSearch, hostSearch.entity().getId(), poolNotInClusterSearch.entity().getHostId(), JoinBuilder.JoinType.INNER); + hostSearch.and("clusterId", hostSearch.entity().getClusterId(), SearchCriteria.Op.NEQ); + } + @Override public List listByPoolId(long id) { SearchCriteria sc = PoolSearch.create(); @@ -194,4 +214,12 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase, Integer> listByPoolIdNotInCluster(long clusterId, long poolId) { + SearchCriteria sc = poolNotInClusterSearch.create(); + sc.setParameters("poolId", poolId); + sc.setJoinParameters("hostSearch", "clusterId", clusterId); + return searchAndCount(sc, null); + } } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java index 4e9c63699ca..e6ffca06f9e 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java @@ -112,7 +112,8 @@ public interface VolumeDao extends GenericDao, StateDao virtualRouters); @@ -158,4 +159,7 @@ public interface VolumeDao extends GenericDao, StateDao listAllocatedVolumesForAccountDiskOfferingIdsAndNotForVms(long accountId, List diskOfferingIds, List vmIds); + List searchRemovedByVms(List vmIds, Long batchSize); + + VolumeVO findOneByIScsiName(String iScsiName); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java index 31d64daf147..0c4d707635a 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java @@ -27,14 +27,12 @@ import java.util.stream.Collectors; import javax.inject.Inject; -import com.cloud.configuration.Resource; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.db.TransactionCallback; import org.apache.cloudstack.reservation.ReservationVO; import org.apache.cloudstack.reservation.dao.ReservationDao; import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; +import com.cloud.configuration.Resource; import com.cloud.exception.InvalidParameterValueException; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.server.ResourceTag.ResourceObjectType; @@ -48,12 +46,15 @@ import com.cloud.storage.VolumeVO; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.utils.Pair; import com.cloud.utils.db.DB; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.exception.CloudRuntimeException; @@ -76,12 +77,12 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol protected GenericSearchBuilder primaryStorageSearch2; protected GenericSearchBuilder secondaryStorageSearch; private final SearchBuilder poolAndPathSearch; + @Inject ReservationDao reservationDao; @Inject - ResourceTagDao _tagsDao; + ResourceTagDao tagsDao; - protected static final String SELECT_VM_SQL = "SELECT DISTINCT instance_id from volumes v where v.host_id = ? and v.mirror_state = ?"; // need to account for zone-wide primary storage where storage_pool has // null-value pod and cluster, where hypervisor information is stored in // storage_pool @@ -395,6 +396,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol AllFieldsSearch.and("updatedCount", AllFieldsSearch.entity().getUpdatedCount(), Op.EQ); AllFieldsSearch.and("name", AllFieldsSearch.entity().getName(), Op.EQ); AllFieldsSearch.and("passphraseId", AllFieldsSearch.entity().getPassphraseId(), Op.EQ); + AllFieldsSearch.and("iScsiName", AllFieldsSearch.entity().get_iScsiName(), Op.EQ); AllFieldsSearch.done(); RootDiskStateSearch = createSearchBuilder(); @@ -502,7 +504,6 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol poolAndPathSearch.and("poolId", poolAndPathSearch.entity().getPoolId(), Op.EQ); poolAndPathSearch.and("path", poolAndPathSearch.entity().getPath(), Op.EQ); poolAndPathSearch.done(); - } @Override @@ -740,7 +741,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol logger.debug(String.format("Removing volume %s from DB", id)); VolumeVO entry = findById(id); if (entry != null) { - _tagsDao.removeByIdAndType(id, ResourceObjectType.Volume); + tagsDao.removeByIdAndType(id, ResourceObjectType.Volume); } boolean result = super.remove(id); @@ -763,7 +764,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol destVol.setInstanceId(instanceId); update(srcVolId, srcVol); update(destVolId, destVol); - _tagsDao.updateResourceId(srcVolId, destVolId, ResourceObjectType.Volume); + tagsDao.updateResourceId(srcVolId, destVolId, ResourceObjectType.Volume); } catch (Exception e) { throw new CloudRuntimeException("Unable to persist the sequence number for this host"); } @@ -896,4 +897,24 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol return volume; }); } + + @Override + public List searchRemovedByVms(List vmIds, Long batchSize) { + if (CollectionUtils.isEmpty(vmIds)) { + return new ArrayList<>(); + } + SearchBuilder sb = createSearchBuilder(); + sb.and("vmIds", sb.entity().getInstanceId(), SearchCriteria.Op.IN); + sb.and("removed", sb.entity().getRemoved(), SearchCriteria.Op.NNULL); + SearchCriteria sc = sb.create(); + sc.setParameters("vmIds", vmIds.toArray()); + Filter filter = new Filter(VolumeVO.class, "id", true, 0L, batchSize); + return searchIncludingRemoved(sc, filter, null, false); + } + + public VolumeVO findOneByIScsiName(String iScsiName) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("iScsiName", iScsiName); + return findOneIncludingRemovedBy(sc); + } } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeStatsDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeStatsDao.java index ff6af56c9c3..b4a596dfc8d 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeStatsDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeStatsDao.java @@ -75,8 +75,10 @@ public interface VolumeStatsDao extends GenericDao { /** * Removes (expunges) all Volume stats with {@code timestamp} less than * a given Date. - * @param limit the maximum date to keep stored. Records that exceed this limit will be removed. + * @param limitDate the maximum date to keep stored. Records that exceed this limit will be removed. + * @param limitPerQuery the maximum amount of rows to be removed in a single query. We loop if there are still rows to be removed after a given query. + * If 0 or negative, no limit is used. */ - void removeAllByTimestampLessThan(Date limit); + void removeAllByTimestampLessThan(Date limitDate, long limitPerQuery); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeStatsDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeStatsDaoImpl.java index 5d0d3c8921c..d1149e47408 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeStatsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeStatsDaoImpl.java @@ -21,6 +21,8 @@ import java.util.List; import javax.annotation.PostConstruct; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.Filter; @@ -33,6 +35,8 @@ import com.cloud.storage.VolumeStatsVO; @Component public class VolumeStatsDaoImpl extends GenericDaoBase implements VolumeStatsDao { + protected Logger logger = LogManager.getLogger(getClass()); + protected SearchBuilder volumeIdSearch; protected SearchBuilder volumeIdTimestampGreaterThanEqualSearch; protected SearchBuilder volumeIdTimestampLessThanEqualSearch; @@ -116,9 +120,21 @@ public class VolumeStatsDaoImpl extends GenericDaoBase impl } @Override - public void removeAllByTimestampLessThan(Date limit) { + public void removeAllByTimestampLessThan(Date limitDate, long limitPerQuery) { SearchCriteria sc = timestampSearch.create(); - sc.setParameters(TIMESTAMP, limit); - expunge(sc); + sc.setParameters(TIMESTAMP, limitDate); + + logger.debug(String.format("Starting to remove all volume_stats rows older than [%s].", limitDate)); + + long totalRemoved = 0; + long removed; + + do { + removed = expunge(sc, limitPerQuery); + totalRemoved += removed; + logger.trace(String.format("Removed [%s] volume_stats rows on the last update and a sum of [%s] volume_stats rows older than [%s] until now.", removed, totalRemoved, limitDate)); + } while (limitPerQuery > 0 && removed >= limitPerQuery); + + logger.info(String.format("Removed a total of [%s] volume_stats rows older than [%s].", totalRemoved, limitDate)); } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseCreator.java b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseCreator.java index 154a8d11887..384826227af 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseCreator.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseCreator.java @@ -116,10 +116,6 @@ public class DatabaseCreator { } public static void main(String[] args) { - - ClassPathXmlApplicationContext appContext = new ClassPathXmlApplicationContext(new String[] {"/com/cloud/upgrade/databaseCreatorContext.xml"}); - appContext.getBean(ComponentContext.class); - String dbPropsFile = ""; List sqlFiles = new ArrayList(); List upgradeClasses = new ArrayList(); @@ -166,13 +162,17 @@ public class DatabaseCreator { System.exit(1); } + initDB(dbPropsFile, rootPassword, databases, dryRun); + + ClassPathXmlApplicationContext appContext = new ClassPathXmlApplicationContext(new String[] {"/com/cloud/upgrade/databaseCreatorContext.xml"}); + appContext.getBean(ComponentContext.class); + try { TransactionLegacy.initDataSource(dbPropsFile); } catch (IOException e) { e.printStackTrace(); System.exit(1); } - initDB(dbPropsFile, rootPassword, databases, dryRun); // Process sql files for (String sqlFile : sqlFiles) { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java index d390a480e41..cb219007325 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -130,6 +130,7 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker { protected static Logger LOGGER = LogManager.getLogger(DatabaseUpgradeChecker.class); private final DatabaseVersionHierarchy hierarchy; private static final String VIEWS_DIRECTORY = Paths.get("META-INF", "db", "views").toString(); + private static final String PROCEDURES_DIRECTORY = Paths.get("META-INF", "db", "procedures").toString(); @Inject VersionDao _dao; @@ -298,83 +299,120 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker { } protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion currentVersion) { + executeProcedureScripts(); + final DbUpgrade[] upgrades = executeUpgrades(dbVersion, currentVersion); + + executeViewScripts(); + updateSystemVmTemplates(upgrades); + } + + protected void executeProcedureScripts() { + LOGGER.info(String.format("Executing Stored Procedure scripts that are under resource directory [%s].", PROCEDURES_DIRECTORY)); + List filesPathUnderViewsDirectory = FileUtil.getFilesPathsUnderResourceDirectory(PROCEDURES_DIRECTORY); + + try (TransactionLegacy txn = TransactionLegacy.open("execute-procedure-scripts")) { + Connection conn = txn.getConnection(); + + for (String filePath : filesPathUnderViewsDirectory) { + LOGGER.debug(String.format("Executing PROCEDURE script [%s].", filePath)); + + InputStream viewScript = Thread.currentThread().getContextClassLoader().getResourceAsStream(filePath); + runScript(conn, viewScript); + } + + LOGGER.info(String.format("Finished execution of PROCEDURE scripts that are under resource directory [%s].", PROCEDURES_DIRECTORY)); + } catch (SQLException e) { + String message = String.format("Unable to execute PROCEDURE scripts due to [%s].", e.getMessage()); + LOGGER.error(message, e); + throw new CloudRuntimeException(message, e); + } + } + + private DbUpgrade[] executeUpgrades(CloudStackVersion dbVersion, CloudStackVersion currentVersion) { LOGGER.info("Database upgrade must be performed from " + dbVersion + " to " + currentVersion); final DbUpgrade[] upgrades = calculateUpgradePath(dbVersion, currentVersion); for (DbUpgrade upgrade : upgrades) { - VersionVO version; - LOGGER.debug("Running upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade - .getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion()); - TransactionLegacy txn = TransactionLegacy.open("Upgrade"); - txn.start(); + VersionVO version = executeUpgrade(upgrade); + executeUpgradeCleanup(upgrade, version); + } + return upgrades; + } + + private VersionVO executeUpgrade(DbUpgrade upgrade) { + VersionVO version; + LOGGER.debug("Running upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade + .getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion()); + TransactionLegacy txn = TransactionLegacy.open("Upgrade"); + txn.start(); + try { + Connection conn; try { - Connection conn; - try { - conn = txn.getConnection(); - } catch (SQLException e) { - String errorMessage = "Unable to upgrade the database"; - LOGGER.error(errorMessage, e); - throw new CloudRuntimeException(errorMessage, e); - } - InputStream[] scripts = upgrade.getPrepareScripts(); - if (scripts != null) { - for (InputStream script : scripts) { - runScript(conn, script); - } - } - - upgrade.performDataMigration(conn); - - version = new VersionVO(upgrade.getUpgradedVersion()); - version = _dao.persist(version); - - txn.commit(); - } catch (CloudRuntimeException e) { + conn = txn.getConnection(); + } catch (SQLException e) { String errorMessage = "Unable to upgrade the database"; LOGGER.error(errorMessage, e); throw new CloudRuntimeException(errorMessage, e); - } finally { - txn.close(); + } + InputStream[] scripts = upgrade.getPrepareScripts(); + if (scripts != null) { + for (InputStream script : scripts) { + runScript(conn, script); + } } - // Run the corresponding '-cleanup.sql' script - txn = TransactionLegacy.open("Cleanup"); - try { - LOGGER.info("Cleanup upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade - .getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion()); + upgrade.performDataMigration(conn); - txn.start(); - Connection conn; - try { - conn = txn.getConnection(); - } catch (SQLException e) { - LOGGER.error("Unable to cleanup the database", e); - throw new CloudRuntimeException("Unable to cleanup the database", e); - } + version = new VersionVO(upgrade.getUpgradedVersion()); + version = _dao.persist(version); - InputStream[] scripts = upgrade.getCleanupScripts(); - if (scripts != null) { - for (InputStream script : scripts) { - runScript(conn, script); - LOGGER.debug("Cleanup script " + upgrade.getClass().getSimpleName() + " is executed successfully"); - } - } - txn.commit(); - - txn.start(); - version.setStep(Step.Complete); - version.setUpdated(new Date()); - _dao.update(version.getId(), version); - txn.commit(); - LOGGER.debug("Upgrade completed for version " + version.getVersion()); - } finally { - txn.close(); - } + txn.commit(); + } catch (CloudRuntimeException e) { + String errorMessage = "Unable to upgrade the database"; + LOGGER.error(errorMessage, e); + throw new CloudRuntimeException(errorMessage, e); + } finally { + txn.close(); } + return version; + } - executeViewScripts(); - updateSystemVmTemplates(upgrades); + private void executeUpgradeCleanup(DbUpgrade upgrade, VersionVO version) { + TransactionLegacy txn; + // Run the corresponding '-cleanup.sql' script + txn = TransactionLegacy.open("Cleanup"); + try { + LOGGER.info("Cleanup upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade + .getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion()); + + txn.start(); + Connection conn; + try { + conn = txn.getConnection(); + } catch (SQLException e) { + LOGGER.error("Unable to cleanup the database", e); + throw new CloudRuntimeException("Unable to cleanup the database", e); + } + + InputStream[] scripts = upgrade.getCleanupScripts(); + if (scripts != null) { + for (InputStream script : scripts) { + runScript(conn, script); + LOGGER.debug("Cleanup script " + upgrade.getClass().getSimpleName() + " is executed successfully"); + } + } + txn.commit(); + + txn.start(); + version.setStep(Step.Complete); + version.setUpdated(new Date()); + _dao.update(version.getId(), version); + txn.commit(); + LOGGER.debug("Upgrade completed for version " + version.getVersion()); + } finally { + txn.close(); + } } protected void executeViewScripts() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 9575cfaf863..7314937ff5b 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -49,8 +49,11 @@ import com.cloud.vm.dao.VMInstanceDaoImpl; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.config.dao.ConfigurationDaoImpl; +import org.apache.cloudstack.framework.config.impl.ConfigurationVO; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreDaoImpl; +import org.apache.cloudstack.storage.datastore.db.ImageStoreDetailsDao; +import org.apache.cloudstack.storage.datastore.db.ImageStoreDetailsDaoImpl; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; @@ -117,6 +120,8 @@ public class SystemVmTemplateRegistration { @Inject ImageStoreDao imageStoreDao; @Inject + ImageStoreDetailsDao imageStoreDetailsDao; + @Inject ClusterDao clusterDao; @Inject ConfigurationDao configurationDao; @@ -130,6 +135,7 @@ public class SystemVmTemplateRegistration { templateDataStoreDao = new BasicTemplateDataStoreDaoImpl(); vmInstanceDao = new VMInstanceDaoImpl(); imageStoreDao = new ImageStoreDaoImpl(); + imageStoreDetailsDao = new ImageStoreDetailsDaoImpl(); clusterDao = new ClusterDaoImpl(); configurationDao = new ConfigurationDaoImpl(); } @@ -142,6 +148,14 @@ public class SystemVmTemplateRegistration { this.systemVmTemplateVersion = systemVmTemplateVersion; } + public static String getMountCommand(String nfsVersion, String device, String dir) { + String cmd = "sudo mount -t nfs"; + if (StringUtils.isNotBlank(nfsVersion)) { + cmd = String.format("%s -o vers=%s", cmd, nfsVersion); + } + return String.format("%s %s %s", cmd, device, dir); + } + public String getSystemVmTemplateVersion() { if (StringUtils.isEmpty(systemVmTemplateVersion)) { return String.format("%s.%s", CS_MAJOR_VERSION, CS_TINY_VERSION); @@ -320,14 +334,14 @@ public class SystemVmTemplateRegistration { } }; - public static boolean validateIfSeeded(String url, String path) { + public static boolean validateIfSeeded(String url, String path, String nfsVersion) { String filePath = null; try { filePath = Files.createTempDirectory(TEMPORARY_SECONDARY_STORE).toString(); if (filePath == null) { throw new CloudRuntimeException("Failed to create temporary directory to mount secondary store"); } - mountStore(url, filePath); + mountStore(url, filePath, nfsVersion); int lastIdx = path.lastIndexOf(File.separator); String partialDirPath = path.substring(0, lastIdx); String templatePath = filePath + File.separator + partialDirPath; @@ -427,14 +441,13 @@ public class SystemVmTemplateRegistration { return new Pair<>(url, storeId); } - public static void mountStore(String storeUrl, String path) { + public static void mountStore(String storeUrl, String path, String nfsVersion) { try { if (storeUrl != null) { URI uri = new URI(UriUtils.encodeURIComponent(storeUrl)); String host = uri.getHost(); String mountPath = uri.getPath(); - String mount = String.format(MOUNT_COMMAND, host + ":" + mountPath, path); - Script.runSimpleBashScript(mount); + Script.runSimpleBashScript(getMountCommand(nfsVersion, host + ":" + mountPath, path)); } } catch (Exception e) { String msg = "NFS Store URL is not in the correct format"; @@ -773,7 +786,8 @@ public class SystemVmTemplateRegistration { throw new CloudRuntimeException("Failed to create temporary file path to mount the store"); } Pair storeUrlAndId = getNfsStoreInZone(zoneId); - mountStore(storeUrlAndId.first(), filePath); + String nfsVersion = getNfsVersion(storeUrlAndId.second()); + mountStore(storeUrlAndId.first(), filePath, nfsVersion); List hypervisorList = fetchAllHypervisors(zoneId); for (String hypervisor : hypervisorList) { Hypervisor.HypervisorType name = Hypervisor.HypervisorType.getType(hypervisor); @@ -784,7 +798,7 @@ public class SystemVmTemplateRegistration { VMTemplateVO templateVO = vmTemplateDao.findById(templateId); TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByTemplate(templateId, DataStoreRole.Image); String installPath = templateDataStoreVO.getInstallPath(); - if (validateIfSeeded(storeUrlAndId.first(), installPath)) { + if (validateIfSeeded(storeUrlAndId.first(), installPath, nfsVersion)) { continue; } else if (templateVO != null) { registerTemplate(hypervisorAndTemplateName, storeUrlAndId, templateVO, templateDataStoreVO, filePath); @@ -889,4 +903,17 @@ public class SystemVmTemplateRegistration { } }); } + + public String getNfsVersion(long storeId) { + final String configKey = "secstorage.nfs.version"; + final Map storeDetails = imageStoreDetailsDao.getDetails(storeId); + if (storeDetails != null && storeDetails.containsKey(configKey)) { + return storeDetails.get(configKey); + } + ConfigurationVO globalNfsVersion = configurationDao.findByName(configKey); + if (globalNfsVersion != null) { + return globalNfsVersion.getValue(); + } + return null; + } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41910to42000.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41910to42000.java index 02266a1331e..8745ae18034 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41910to42000.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41910to42000.java @@ -31,6 +31,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class Upgrade41910to42000 extends DbUpgradeAbstractImpl implements DbUpgrade, DbUpgradeSystemVmTemplate { private SystemVmTemplateRegistration systemVmTemplateRegistration; + private static final int MAX_INDEXED_CHARS_IN_CHAR_SET_UTF8MB4 = 191; @Override public String[] getUpgradableVersionRange() { @@ -61,6 +62,7 @@ public class Upgrade41910to42000 extends DbUpgradeAbstractImpl implements DbUpgr @Override public void performDataMigration(Connection conn) { updateKubernetesClusterNodeVersions(conn); + checkAndUpdateAffinityGroupNameCharSetToUtf8mb4(conn); } @Override @@ -177,4 +179,32 @@ public class Upgrade41910to42000 extends DbUpgradeAbstractImpl implements DbUpgr throw new CloudRuntimeException(errMsg, e); } } + + private void checkAndUpdateAffinityGroupNameCharSetToUtf8mb4(Connection conn) { + logger.debug("Check and update char set for affinity group name to utf8mb4"); + try { + PreparedStatement pstmt = conn.prepareStatement("SELECT MAX(LENGTH(name)) FROM `cloud`.`affinity_group`"); + ResultSet rs = pstmt.executeQuery(); + if (rs.next()) { + long maxLengthOfName = rs.getLong(1); + if (maxLengthOfName <= MAX_INDEXED_CHARS_IN_CHAR_SET_UTF8MB4) { + pstmt = conn.prepareStatement(String.format("ALTER TABLE `cloud`.`affinity_group` MODIFY `name` VARCHAR(%d) CHARACTER SET utf8mb4 NOT NULL", MAX_INDEXED_CHARS_IN_CHAR_SET_UTF8MB4)); + pstmt.executeUpdate(); + logger.debug("Successfully updated char set for affinity group name to utf8mb4"); + } else { + logger.warn("Unable to update char set for affinity group name, as there are some names with more than " + MAX_INDEXED_CHARS_IN_CHAR_SET_UTF8MB4 + + " chars (max supported chars for index)"); + } + } + + if (rs != null && !rs.isClosed()) { + rs.close(); + } + if (pstmt != null && !pstmt.isClosed()) { + pstmt.close(); + } + } catch (final SQLException e) { + logger.warn("Exception while updating char set for affinity group name to utf8mb4: " + e.getMessage()); + } + } } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageDaoImpl.java index 2335043b7c5..fb925a28513 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageDaoImpl.java @@ -29,6 +29,7 @@ import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; @@ -538,21 +539,20 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage @Override public void removeOldUsageRecords(int days) { - String sql = DELETE_ALL_BY_INTERVAL; - TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); - PreparedStatement pstmt = null; - try { - txn.start(); - pstmt = txn.prepareAutoCloseStatement(sql); - pstmt.setLong(1, days); - pstmt.executeUpdate(); - txn.commit(); - } catch (Exception ex) { - txn.rollback(); - logger.error("error removing old cloud_usage records for interval: " + days); - } finally { - txn.close(); - } + Transaction.execute(TransactionLegacy.USAGE_DB, new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + TransactionLegacy txn = TransactionLegacy.currentTxn(); + PreparedStatement pstmt = null; + try { + pstmt = txn.prepareAutoCloseStatement(DELETE_ALL_BY_INTERVAL); + pstmt.setLong(1, days); + pstmt.executeUpdate(); + } catch (Exception ex) { + logger.error("error removing old cloud_usage records for interval: " + days); + } + } + }); } public UsageVO persistUsage(final UsageVO usage) { diff --git a/engine/schema/src/main/java/com/cloud/user/AccountDetailsDao.java b/engine/schema/src/main/java/com/cloud/user/AccountDetailsDao.java index f4534ee41ee..514433e8068 100644 --- a/engine/schema/src/main/java/com/cloud/user/AccountDetailsDao.java +++ b/engine/schema/src/main/java/com/cloud/user/AccountDetailsDao.java @@ -34,4 +34,6 @@ public interface AccountDetailsDao extends GenericDao { * they will get created */ void update(long accountId, Map details); + + String getActualValue(AccountDetailVO accountDetailVO); } diff --git a/engine/schema/src/main/java/com/cloud/user/AccountDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/user/AccountDetailsDaoImpl.java index 5451192fc6d..510270ad7bf 100644 --- a/engine/schema/src/main/java/com/cloud/user/AccountDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/user/AccountDetailsDaoImpl.java @@ -26,20 +26,21 @@ import javax.inject.Inject; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.ConfigKey.Scope; import org.apache.cloudstack.framework.config.ScopedConfigStorage; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.config.impl.ConfigurationVO; import com.cloud.domain.DomainDetailVO; import com.cloud.domain.DomainVO; -import com.cloud.domain.dao.DomainDetailsDao; import com.cloud.domain.dao.DomainDao; +import com.cloud.domain.dao.DomainDetailsDao; import com.cloud.user.dao.AccountDao; - +import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.TransactionLegacy; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; public class AccountDetailsDaoImpl extends GenericDaoBase implements AccountDetailsDao, ScopedConfigStorage { protected final SearchBuilder accountSearch; @@ -116,10 +117,10 @@ public class AccountDetailsDaoImpl extends GenericDaoBase } @Override - public String getConfigValue(long id, ConfigKey key) { + public String getConfigValue(long id, String key) { // check if account level setting is configured - AccountDetailVO vo = findDetail(id, key.key()); - String value = vo == null ? null : vo.getValue(); + AccountDetailVO vo = findDetail(id, key); + String value = vo == null ? null : getActualValue(vo); if (value != null) { return value; } @@ -138,9 +139,9 @@ public class AccountDetailsDaoImpl extends GenericDaoBase if (account.isPresent()) { DomainVO domain = _domainDao.findById(account.get().getDomainId()); while (domain != null) { - DomainDetailVO domainVO = _domainDetailsDao.findDetail(domain.getId(), key.key()); + DomainDetailVO domainVO = _domainDetailsDao.findDetail(domain.getId(), key); if (domainVO != null) { - value = domainVO.getValue(); + value = _domainDetailsDao.getActualValue(domainVO); break; } else if (domain.getParent() != null) { domain = _domainDao.findById(domain.getParent()); @@ -152,4 +153,13 @@ public class AccountDetailsDaoImpl extends GenericDaoBase } return value; } + + @Override + public String getActualValue(AccountDetailVO accountDetailVO) { + ConfigurationVO configurationVO = _configDao.findByName(accountDetailVO.getName()); + if (configurationVO != null && configurationVO.isEncrypted()) { + return DBEncryptionUtil.decrypt(accountDetailVO.getValue()); + } + return accountDetailVO.getValue(); + } } diff --git a/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java b/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java index c18ca53f7ab..1da7d52a366 100644 --- a/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java +++ b/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java @@ -17,6 +17,7 @@ package com.cloud.user; import java.util.Date; +import java.util.HashMap; import java.util.Map; import javax.persistence.Column; @@ -361,6 +362,9 @@ public class UserAccountVO implements UserAccount, InternalIdentity { @Override public Map getDetails() { + if (details == null) { + details = new HashMap<>(); + } return details; } diff --git a/engine/schema/src/main/java/com/cloud/vm/ItWorkDao.java b/engine/schema/src/main/java/com/cloud/vm/ItWorkDao.java index 2d4a5e138fe..ab07d6989fa 100644 --- a/engine/schema/src/main/java/com/cloud/vm/ItWorkDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/ItWorkDao.java @@ -41,5 +41,6 @@ public interface ItWorkDao extends GenericDao { boolean updateStep(ItWorkVO work, Step step); List listWorkInProgressFor(long nodeId); + int expungeByVmList(List vmIds, Long batchSize); } diff --git a/engine/schema/src/main/java/com/cloud/vm/ItWorkDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/ItWorkDaoImpl.java index ff727904dcb..0cc0a084443 100644 --- a/engine/schema/src/main/java/com/cloud/vm/ItWorkDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/ItWorkDaoImpl.java @@ -18,7 +18,7 @@ package com.cloud.vm; import java.util.List; - +import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; @@ -103,4 +103,16 @@ public class ItWorkDaoImpl extends GenericDaoBase implements I return search(sc, null); } + + @Override + public int expungeByVmList(List vmIds, Long batchSize) { + if (CollectionUtils.isEmpty(vmIds)) { + return 0; + } + SearchBuilder sb = createSearchBuilder(); + sb.and("vmIds", sb.entity().getInstanceId(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("vmIds", vmIds.toArray()); + return batchExpunge(sc, batchSize); + } } diff --git a/engine/schema/src/main/java/com/cloud/vm/UserVmVO.java b/engine/schema/src/main/java/com/cloud/vm/UserVmVO.java index ce0bd2d5717..ce3a9a84a34 100644 --- a/engine/schema/src/main/java/com/cloud/vm/UserVmVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/UserVmVO.java @@ -148,6 +148,7 @@ public class UserVmVO extends VMInstanceVO implements UserVm { return updateParameters; } + @Override public String getUserVmType() { return userVmType; } diff --git a/engine/schema/src/main/java/com/cloud/vm/VMInstanceVO.java b/engine/schema/src/main/java/com/cloud/vm/VMInstanceVO.java index a1600e04350..a1d9f4a8089 100644 --- a/engine/schema/src/main/java/com/cloud/vm/VMInstanceVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/VMInstanceVO.java @@ -167,10 +167,8 @@ public class VMInstanceVO implements VirtualMachine, FiniteStateObject details; @@ -542,6 +540,14 @@ public class VMInstanceVO implements VirtualMachine, FiniteStateObject getEntityType() { return VirtualMachine.class; diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleSessionDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleSessionDao.java index 71b1aed1938..79158dd13b2 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleSessionDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleSessionDao.java @@ -23,6 +23,7 @@ import com.cloud.vm.ConsoleSessionVO; import com.cloud.utils.db.GenericDao; import java.util.Date; +import java.util.List; public interface ConsoleSessionDao extends GenericDao { @@ -33,4 +34,6 @@ public interface ConsoleSessionDao extends GenericDao { int expungeSessionsOlderThanDate(Date date); void acquireSession(String sessionUuid); + + int expungeByVmList(List vmIds, Long batchSize); } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleSessionDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleSessionDaoImpl.java index 8e7e229622e..48709674451 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleSessionDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleSessionDaoImpl.java @@ -20,6 +20,9 @@ package com.cloud.vm.dao; import java.util.Date; +import java.util.List; + +import org.apache.commons.collections.CollectionUtils; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; @@ -65,5 +68,15 @@ public class ConsoleSessionDaoImpl extends GenericDaoBase vmIds, Long batchSize) { + if (CollectionUtils.isEmpty(vmIds)) { + return 0; + } + SearchBuilder sb = createSearchBuilder(); + sb.and("vmIds", sb.entity().getInstanceId(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("vmIds", vmIds.toArray()); + return batchExpunge(sc, batchSize); + } } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/NicDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/NicDao.java index 23c26ea0718..d34b03c4cb0 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/NicDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/NicDao.java @@ -100,4 +100,5 @@ public interface NicDao extends GenericDao { NicVO findByIpAddressAndVmType(String ip, VirtualMachine.Type vmType); List listByNetworkIdAndType(long networkId, VirtualMachine.Type vmType); + List searchRemovedByVms(List vmIds, Long batchSize); } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/NicDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/NicDaoImpl.java index 3eee1d4e749..7d1af1982ae 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/NicDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/NicDaoImpl.java @@ -17,11 +17,13 @@ package com.cloud.vm.dao; import java.net.URI; +import java.util.ArrayList; import java.util.List; import javax.annotation.PostConstruct; import javax.inject.Inject; +import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; import com.cloud.utils.db.Filter; @@ -428,4 +430,18 @@ public class NicDaoImpl extends GenericDaoBase implements NicDao { sc.setParameters("vmType", vmType); return listBy(sc); } + + @Override + public List searchRemovedByVms(List vmIds, Long batchSize) { + if (CollectionUtils.isEmpty(vmIds)) { + return new ArrayList<>(); + } + SearchBuilder sb = createSearchBuilder(); + sb.and("vmIds", sb.entity().getInstanceId(), SearchCriteria.Op.IN); + sb.and("removed", sb.entity().getRemoved(), SearchCriteria.Op.NNULL); + SearchCriteria sc = sb.create(); + sc.setParameters("vmIds", vmIds.toArray()); + Filter filter = new Filter(NicVO.class, "id", true, 0L, batchSize); + return searchIncludingRemoved(sc, filter, null, false); + } } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/NicExtraDhcpOptionDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/NicExtraDhcpOptionDao.java index 69d9c00e1e0..7bae64a6acb 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/NicExtraDhcpOptionDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/NicExtraDhcpOptionDao.java @@ -29,4 +29,5 @@ public interface NicExtraDhcpOptionDao extends GenericDao extraDhcpOptions); + int expungeByNicList(List nicIds, Long batchSize); } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/NicExtraDhcpOptionDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/NicExtraDhcpOptionDaoImpl.java index 3056c73938e..0f3679d66a3 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/NicExtraDhcpOptionDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/NicExtraDhcpOptionDaoImpl.java @@ -16,13 +16,13 @@ // under the License. package com.cloud.vm.dao; -import org.springframework.stereotype.Component; - import java.util.List; +import org.apache.commons.collections.CollectionUtils; +import org.springframework.stereotype.Component; + import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; - import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.vm.NicExtraDhcpOption; @@ -74,4 +74,15 @@ public class NicExtraDhcpOptionDaoImpl extends GenericDaoBase nicIds, Long batchSize) { + if (CollectionUtils.isEmpty(nicIds)) { + return 0; + } + SearchBuilder sb = createSearchBuilder(); + sb.and("nicIds", sb.entity().getNicId(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("nicIds", nicIds.toArray()); + return batchExpunge(sc, batchSize); + } } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpDao.java index cbb52e57282..ff7089ca427 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpDao.java @@ -55,4 +55,5 @@ public interface NicSecondaryIpDao extends GenericDao { List listSecondaryIpUsingKeyword(long nicId, String keyword); int moveSecondaryIps(long fromNicId, long toNicId); + int expungeByVmList(List vmIds, Long batchSize); } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpDaoImpl.java index a56d35d5a63..563b3279520 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/NicSecondaryIpDaoImpl.java @@ -19,6 +19,7 @@ package com.cloud.vm.dao; import java.util.ArrayList; import java.util.List; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.springframework.stereotype.Component; @@ -192,4 +193,16 @@ public class NicSecondaryIpDaoImpl extends GenericDaoBase vmIds, Long batchSize) { + if (CollectionUtils.isEmpty(vmIds)) { + return 0; + } + SearchBuilder sb = createSearchBuilder(); + sb.and("vmIds", sb.entity().getVmId(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("vmIds", vmIds.toArray()); + return batchExpunge(sc, batchSize); + } } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDao.java index 39c65866658..7de543e69d3 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDao.java @@ -53,7 +53,11 @@ public interface UserVmDao extends GenericDao { * @param hostName TODO * @param instanceName */ - void updateVM(long id, String displayName, boolean enable, Long osTypeId, String userData, Long userDataId, String userDataDetails, boolean displayVm, boolean isDynamicallyScalable, String customId, String hostName, String instanceName); + void updateVM(long id, String displayName, boolean enable, Long osTypeId, + String userData, Long userDataId, String userDataDetails, + boolean displayVm, boolean isDynamicallyScalable, + boolean deleteProtection, String customId, String hostName, + String instanceName); List findDestroyedVms(Date date); diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDaoImpl.java index 536779125e2..cc8b9fc59a8 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDaoImpl.java @@ -274,8 +274,11 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use } @Override - public void updateVM(long id, String displayName, boolean enable, Long osTypeId, String userData, Long userDataId, String userDataDetails, boolean displayVm, - boolean isDynamicallyScalable, String customId, String hostName, String instanceName) { + public void updateVM(long id, String displayName, boolean enable, Long osTypeId, + String userData, Long userDataId, String userDataDetails, + boolean displayVm, boolean isDynamicallyScalable, + boolean deleteProtection, String customId, String hostName, + String instanceName) { UserVmVO vo = createForUpdate(); vo.setDisplayName(displayName); vo.setHaEnabled(enable); @@ -285,6 +288,7 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use vo.setUserDataDetails(userDataDetails); vo.setDisplayVm(displayVm); vo.setDynamicallyScalable(isDynamicallyScalable); + vo.setDeleteProtection(deleteProtection); if (hostName != null) { vo.setHostName(hostName); } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java index 42c00231aac..52bc5aac7e2 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java @@ -165,4 +165,9 @@ public interface VMInstanceDao extends GenericDao, StateDao< void updateSystemVmTemplateId(long templateId, Hypervisor.HypervisorType hypervisorType); List listByHostOrLastHostOrHostPod(List hostIds, long podId); + + List searchRemovedByRemoveDate(final Date startDate, final Date endDate, final Long batchSize, + List skippedVmIds); + + Pair, Integer> listByVmsNotInClusterUsingPool(long clusterId, long poolId); } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java index b7b787b0045..744518ba743 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -24,21 +24,26 @@ import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import javax.annotation.PostConstruct; import javax.inject.Inject; +import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.server.ResourceTag.ResourceObjectType; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.utils.DateUtil; import com.cloud.utils.Pair; import com.cloud.utils.db.Attribute; import com.cloud.utils.db.DB; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.JoinBuilder; @@ -95,11 +100,16 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem protected SearchBuilder NotMigratingSearch; protected SearchBuilder BackupSearch; protected SearchBuilder LastHostAndStatesSearch; + protected SearchBuilder VmsNotInClusterUsingPool; @Inject - ResourceTagDao _tagsDao; + ResourceTagDao tagsDao; @Inject - NicDao _nicDao; + NicDao nicDao; + @Inject + VolumeDao volumeDao; + @Inject + HostDao hostDao; protected Attribute _updateTimeAttr; @@ -276,7 +286,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem _updateTimeAttr = _allAttributes.get("updateTime"); assert _updateTimeAttr != null : "Couldn't get this updateTime attribute"; - SearchBuilder nicSearch = _nicDao.createSearchBuilder(); + SearchBuilder nicSearch = nicDao.createSearchBuilder(); nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ); nicSearch.and("removedNic", nicSearch.entity().getRemoved(), SearchCriteria.Op.NULL); @@ -305,6 +315,16 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem LastHostAndStatesSearch.and("states", LastHostAndStatesSearch.entity().getState(), Op.IN); LastHostAndStatesSearch.done(); + VmsNotInClusterUsingPool = createSearchBuilder(); + SearchBuilder volumeSearch = volumeDao.createSearchBuilder(); + volumeSearch.and("poolId", volumeSearch.entity().getPoolId(), Op.EQ); + volumeSearch.and("removed", volumeSearch.entity().getRemoved(), Op.NULL); + VmsNotInClusterUsingPool.join("volumeSearch", volumeSearch, volumeSearch.entity().getInstanceId(), VmsNotInClusterUsingPool.entity().getId(), JoinType.INNER); + SearchBuilder hostSearch2 = hostDao.createSearchBuilder(); + hostSearch2.and("clusterId", hostSearch2.entity().getClusterId(), SearchCriteria.Op.NEQ); + VmsNotInClusterUsingPool.join("hostSearch2", hostSearch2, hostSearch2.entity().getId(), VmsNotInClusterUsingPool.entity().getHostId(), JoinType.INNER); + VmsNotInClusterUsingPool.and("vmStates", VmsNotInClusterUsingPool.entity().getState(), Op.IN); + VmsNotInClusterUsingPool.done(); } @Override @@ -834,8 +854,9 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem public List listNonRemovedVmsByTypeAndNetwork(long networkId, VirtualMachine.Type... types) { if (NetworkTypeSearch == null) { - SearchBuilder nicSearch = _nicDao.createSearchBuilder(); + SearchBuilder nicSearch = nicDao.createSearchBuilder(); nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ); + nicSearch.and("removed", nicSearch.entity().getRemoved(), SearchCriteria.Op.NULL); NetworkTypeSearch = createSearchBuilder(); NetworkTypeSearch.and("types", NetworkTypeSearch.entity().getType(), SearchCriteria.Op.IN); @@ -871,7 +892,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem txn.start(); VMInstanceVO vm = findById(id); if (vm != null && vm.getType() == Type.User) { - _tagsDao.removeByIdAndType(id, ResourceObjectType.UserVm); + tagsDao.removeByIdAndType(id, ResourceObjectType.UserVm); } boolean result = super.remove(id); txn.commit(); @@ -1016,4 +1037,36 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("podId", String.valueOf(podId)); return listBy(sc); } + + @Override + public List searchRemovedByRemoveDate(Date startDate, Date endDate, Long batchSize, + List skippedVmIds) { + SearchBuilder sb = createSearchBuilder(); + sb.and("removed", sb.entity().getRemoved(), SearchCriteria.Op.NNULL); + sb.and("startDate", sb.entity().getRemoved(), SearchCriteria.Op.GTEQ); + sb.and("endDate", sb.entity().getRemoved(), SearchCriteria.Op.LTEQ); + sb.and("skippedVmIds", sb.entity().getId(), Op.NOTIN); + SearchCriteria sc = sb.create(); + if (startDate != null) { + sc.setParameters("startDate", startDate); + } + if (endDate != null) { + sc.setParameters("endDate", endDate); + } + if (CollectionUtils.isNotEmpty(skippedVmIds)) { + sc.setParameters("skippedVmIds", skippedVmIds.toArray()); + } + Filter filter = new Filter(VMInstanceVO.class, "id", true, 0L, batchSize); + return searchIncludingRemoved(sc, filter, null, false); + } + + public Pair, Integer> listByVmsNotInClusterUsingPool(long clusterId, long poolId) { + SearchCriteria sc = VmsNotInClusterUsingPool.create(); + sc.setParameters("vmStates", State.Starting, State.Running, State.Stopping, State.Migrating, State.Restoring); + sc.setJoinParameters("volumeSearch", "poolId", poolId); + sc.setJoinParameters("hostSearch2", "clusterId", clusterId); + List vms = search(sc, null); + List uniqueVms = vms.stream().distinct().collect(Collectors.toList()); + return new Pair<>(uniqueVms, uniqueVms.size()); + } } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VmStatsDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/VmStatsDao.java index 879faaf5c90..0d7aa703a8c 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VmStatsDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VmStatsDao.java @@ -75,8 +75,10 @@ public interface VmStatsDao extends GenericDao { /** * Removes (expunges) all VM stats with {@code timestamp} less than * a given Date. - * @param limit the maximum date to keep stored. Records that exceed this limit will be removed. + * @param limitDate the maximum date to keep stored. Records that exceed this limit will be removed. + * @param limitPerQuery the maximum amount of rows to be removed in a single query. We loop if there are still rows to be removed after a given query. + * If 0 or negative, no limit is used. */ - void removeAllByTimestampLessThan(Date limit); + void removeAllByTimestampLessThan(Date limitDate, long limitPerQuery); } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VmStatsDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/VmStatsDaoImpl.java index 1bef8f0626c..aa58e489364 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VmStatsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VmStatsDaoImpl.java @@ -21,6 +21,8 @@ import java.util.List; import javax.annotation.PostConstruct; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.Filter; @@ -33,6 +35,8 @@ import com.cloud.vm.VmStatsVO; @Component public class VmStatsDaoImpl extends GenericDaoBase implements VmStatsDao { + protected Logger logger = LogManager.getLogger(getClass()); + protected SearchBuilder vmIdSearch; protected SearchBuilder vmIdTimestampGreaterThanEqualSearch; protected SearchBuilder vmIdTimestampLessThanEqualSearch; @@ -113,10 +117,22 @@ public class VmStatsDaoImpl extends GenericDaoBase implements V } @Override - public void removeAllByTimestampLessThan(Date limit) { + public void removeAllByTimestampLessThan(Date limitDate, long limitPerQuery) { SearchCriteria sc = timestampSearch.create(); - sc.setParameters("timestamp", limit); - expunge(sc); + sc.setParameters("timestamp", limitDate); + + logger.debug(String.format("Starting to remove all vm_stats rows older than [%s].", limitDate)); + + long totalRemoved = 0; + long removed; + + do { + removed = expunge(sc, limitPerQuery); + totalRemoved += removed; + logger.trace(String.format("Removed [%s] vm_stats rows on the last update and a sum of [%s] vm_stats rows older than [%s] until now.", removed, totalRemoved, limitDate)); + } while (limitPerQuery > 0 && removed >= limitPerQuery); + + logger.info(String.format("Removed a total of [%s] vm_stats rows older than [%s].", totalRemoved, limitDate)); } } diff --git a/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDao.java b/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDao.java index 31999ef15d6..0143aaa1e73 100644 --- a/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDao.java @@ -38,4 +38,6 @@ public interface VMSnapshotDao extends GenericDao, StateDao< VMSnapshotVO findByName(Long vmId, String name); List listByAccountId(Long accountId); + List searchByVms(List vmIds); + List searchRemovedByVms(List vmIds, Long batchSize); } diff --git a/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java index 062960130ac..ab8f5f2cd84 100644 --- a/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java @@ -17,12 +17,14 @@ package com.cloud.vm.snapshot.dao; +import java.util.ArrayList; import java.util.Date; import java.util.List; - +import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -180,4 +182,29 @@ public class VMSnapshotDaoImpl extends GenericDaoBase implem return rows > 0; } + @Override + public List searchByVms(List vmIds) { + if (CollectionUtils.isEmpty(vmIds)) { + return new ArrayList<>(); + } + SearchBuilder sb = createSearchBuilder(); + sb.and("vmIds", sb.entity().getVmId(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("vmIds", vmIds.toArray()); + return search(sc, null); + } + + @Override + public List searchRemovedByVms(List vmIds, Long batchSize) { + if (CollectionUtils.isEmpty(vmIds)) { + return new ArrayList<>(); + } + SearchBuilder sb = createSearchBuilder(); + sb.and("vmIds", sb.entity().getVmId(), SearchCriteria.Op.IN); + sb.and("removed", sb.entity().getRemoved(), SearchCriteria.Op.NNULL); + SearchCriteria sc = sb.create(); + sc.setParameters("vmIds", vmIds.toArray()); + Filter filter = new Filter(VMSnapshotVO.class, "id", true, 0L, batchSize); + return searchIncludingRemoved(sc, filter, null, false); + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java b/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java index d4647255fc6..084df29fa42 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/acl/RoleVO.java @@ -58,11 +58,16 @@ public class RoleVO implements Role { @Column(name = "public_role") private boolean publicRole = true; + @Column(name = "state") + @Enumerated(value = EnumType.STRING) + private State state; + @Column(name = GenericDao.REMOVED_COLUMN) private Date removed; public RoleVO() { this.uuid = UUID.randomUUID().toString(); + this.state = State.ENABLED; } public RoleVO(final String name, final RoleType roleType, final String description) { @@ -131,4 +136,12 @@ public class RoleVO implements Role { public void setPublicRole(boolean publicRole) { this.publicRole = publicRole; } + + public State getState() { + return state; + } + + public void setState(State state) { + this.state = state; + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/acl/dao/RoleDao.java b/engine/schema/src/main/java/org/apache/cloudstack/acl/dao/RoleDao.java index 2d4151afc7d..f4fdb6a2b16 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/acl/dao/RoleDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/acl/dao/RoleDao.java @@ -28,15 +28,15 @@ import java.util.List; public interface RoleDao extends GenericDao { List findAllByName(String roleName, boolean showPrivateRole); - Pair, Integer> findAllByName(final String roleName, String keyword, Long offset, Long limit, boolean showPrivateRole); + Pair, Integer> findAllByName(final String roleName, String keyword, String state, Long offset, Long limit, boolean showPrivateRole); List findAllByRoleType(RoleType type, boolean showPrivateRole); List findByName(String roleName, boolean showPrivateRole); RoleVO findByNameAndType(String roleName, RoleType type, boolean showPrivateRole); - Pair, Integer> findAllByRoleType(RoleType type, Long offset, Long limit, boolean showPrivateRole); + Pair, Integer> findAllByRoleType(RoleType type, String state, Long offset, Long limit, boolean showPrivateRole); - Pair, Integer> listAllRoles(Long startIndex, Long limit, boolean showPrivateRole); + Pair, Integer> listAllRoles(String state, Long startIndex, Long limit, boolean showPrivateRole); List searchByIds(Long... ids); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/acl/dao/RoleDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/acl/dao/RoleDaoImpl.java index 2e8fdd5fcc2..48c0d828a41 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/acl/dao/RoleDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/acl/dao/RoleDaoImpl.java @@ -50,11 +50,13 @@ public class RoleDaoImpl extends GenericDaoBase implements RoleDao RoleByNameSearch = createSearchBuilder(); RoleByNameSearch.and("roleName", RoleByNameSearch.entity().getName(), SearchCriteria.Op.LIKE); RoleByNameSearch.and("isPublicRole", RoleByNameSearch.entity().isPublicRole(), SearchCriteria.Op.EQ); + RoleByNameSearch.and("state", RoleByNameSearch.entity().getState(), SearchCriteria.Op.EQ); RoleByNameSearch.done(); RoleByTypeSearch = createSearchBuilder(); RoleByTypeSearch.and("roleType", RoleByTypeSearch.entity().getRoleType(), SearchCriteria.Op.EQ); RoleByTypeSearch.and("isPublicRole", RoleByTypeSearch.entity().isPublicRole(), SearchCriteria.Op.EQ); + RoleByTypeSearch.and("state", RoleByTypeSearch.entity().getState(), SearchCriteria.Op.EQ); RoleByTypeSearch.done(); RoleByNameAndTypeSearch = createSearchBuilder(); @@ -65,16 +67,17 @@ public class RoleDaoImpl extends GenericDaoBase implements RoleDao RoleByIsPublicSearch = createSearchBuilder(); RoleByIsPublicSearch.and("isPublicRole", RoleByIsPublicSearch.entity().isPublicRole(), SearchCriteria.Op.EQ); + RoleByIsPublicSearch.and("state", RoleByIsPublicSearch.entity().getState(), SearchCriteria.Op.EQ); RoleByIsPublicSearch.done(); } @Override public List findAllByName(final String roleName, boolean showPrivateRole) { - return findAllByName(roleName, null, null, null, showPrivateRole).first(); + return findAllByName(roleName, null, null, null, null, showPrivateRole).first(); } @Override - public Pair, Integer> findAllByName(final String roleName, String keyword, Long offset, Long limit, boolean showPrivateRole) { + public Pair, Integer> findAllByName(final String roleName, String keyword, String state, Long offset, Long limit, boolean showPrivateRole) { SearchCriteria sc = RoleByNameSearch.create(); filterPrivateRolesIfNeeded(sc, showPrivateRole); if (StringUtils.isNotEmpty(roleName)) { @@ -83,19 +86,25 @@ public class RoleDaoImpl extends GenericDaoBase implements RoleDao if (StringUtils.isNotEmpty(keyword)) { sc.setParameters("roleName", "%" + keyword + "%"); } + if (StringUtils.isNotEmpty(state)) { + sc.setParameters("state", state); + } return searchAndCount(sc, new Filter(RoleVO.class, "id", true, offset, limit)); } @Override public List findAllByRoleType(final RoleType type, boolean showPrivateRole) { - return findAllByRoleType(type, null, null, showPrivateRole).first(); + return findAllByRoleType(type, null, null, null, showPrivateRole).first(); } - public Pair, Integer> findAllByRoleType(final RoleType type, Long offset, Long limit, boolean showPrivateRole) { + public Pair, Integer> findAllByRoleType(final RoleType type, String state, Long offset, Long limit, boolean showPrivateRole) { SearchCriteria sc = RoleByTypeSearch.create(); filterPrivateRolesIfNeeded(sc, showPrivateRole); sc.setParameters("roleType", type); + if (StringUtils.isNotEmpty(state)) { + sc.setParameters("state", state); + } return searchAndCount(sc, new Filter(RoleVO.class, "id", true, offset, limit)); } @@ -117,8 +126,11 @@ public class RoleDaoImpl extends GenericDaoBase implements RoleDao } @Override - public Pair, Integer> listAllRoles(Long startIndex, Long limit, boolean showPrivateRole) { + public Pair, Integer> listAllRoles(String state, Long startIndex, Long limit, boolean showPrivateRole) { SearchCriteria sc = RoleByIsPublicSearch.create(); + if (StringUtils.isNotEmpty(state)) { + sc.setParameters("state", state); + } filterPrivateRolesIfNeeded(sc, showPrivateRole); return searchAndCount(sc, new Filter(RoleVO.class, "id", true, startIndex, limit)); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDao.java b/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDao.java index 010720ba33a..859b2921504 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDao.java @@ -38,4 +38,7 @@ public interface AffinityGroupDao extends GenericDao { AffinityGroupVO findByAccountAndType(Long accountId, String string); AffinityGroupVO findDomainLevelGroupByType(Long domainId, String string); + + List listByIds(List ids, boolean exclusive); + } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDaoImpl.java index 3bd7c6d082b..5bd598f36a0 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDaoImpl.java @@ -31,6 +31,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; public class AffinityGroupDaoImpl extends GenericDaoBase implements AffinityGroupDao { + private SearchBuilder IdsSearch; private SearchBuilder AccountIdSearch; private SearchBuilder AccountIdNameSearch; private SearchBuilder AccountIdNamesSearch; @@ -47,6 +48,10 @@ public class AffinityGroupDaoImpl extends GenericDaoBase @PostConstruct protected void init() { + IdsSearch = createSearchBuilder(); + IdsSearch.and("idIn", IdsSearch.entity().getId(), SearchCriteria.Op.IN); + IdsSearch.done(); + AccountIdSearch = createSearchBuilder(); AccountIdSearch.and("accountId", AccountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ); AccountIdSearch.done(); @@ -158,4 +163,11 @@ public class AffinityGroupDaoImpl extends GenericDaoBase sc.setJoinParameters("domainTypeSearch", "domainId", domainId); return findOneBy(sc); } + + @Override + public List listByIds(List ids, boolean exclusive) { + SearchCriteria sc = IdsSearch.create(); + sc.setParameters("idIn", ids.toArray()); + return lockRows(sc, null, exclusive); + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupRepositoryVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupRepositoryVO.java new file mode 100644 index 00000000000..e8364520ed0 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupRepositoryVO.java @@ -0,0 +1,155 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.backup; + +import com.cloud.utils.db.Encrypt; + +import java.util.Date; +import java.util.UUID; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.Temporal; +import javax.persistence.TemporalType; + +@Entity +@Table(name = "backup_repository") +public class BackupRepositoryVO implements BackupRepository { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "name") + private String name; + + @Column(name = "zone_id", nullable = false) + private long zoneId; + + @Column(name = "provider", nullable = false) + private String provider; + + @Column(name = "type", nullable = false) + private String type; + + @Column(name = "address", nullable = false) + private String address; + + @Encrypt + @Column(name = "mount_opts") + private String mountOptions; + + @Column(name = "used_bytes",nullable = true) + private Long usedBytes; + + @Column(name = "capacity_bytes", nullable = true) + private Long capacityBytes; + + @Column(name = "created") + @Temporal(value = TemporalType.TIMESTAMP) + private Date created; + + @Column(name = "removed") + @Temporal(value = TemporalType.TIMESTAMP) + private Date removed; + + public BackupRepositoryVO() { + this.uuid = UUID.randomUUID().toString(); + } + + public BackupRepositoryVO(final long zoneId, final String provider, final String name, final String type, final String address, final String mountOptions, final Long capacityBytes) { + this(); + this.zoneId = zoneId; + this.provider = provider; + this.name = name; + this.type = type; + this.address = address; + this.mountOptions = mountOptions; + this.capacityBytes = capacityBytes; + this.created = new Date(); + } + + public String getUuid() { + return uuid; + } + + public long getId() { + return id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Override + public long getZoneId() { + return zoneId; + } + + @Override + public String getProvider() { + return provider; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public void setAddress(String address) { + this.address = address; + } + + @Override + public String getAddress() { + return address; + } + + @Override + public String getMountOptions() { + return mountOptions; + } + + @Override + public Long getUsedBytes() { + return usedBytes; + } + + @Override + public Long getCapacityBytes() { + return capacityBytes; + } + + public Date getCreated() { + return created; + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java index 3e5db0443d8..9b285e66cab 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java @@ -18,8 +18,13 @@ package org.apache.cloudstack.backup; import com.cloud.utils.db.GenericDao; +import com.google.gson.Gson; +import org.apache.commons.lang3.StringUtils; +import java.util.Arrays; +import java.util.Collections; import java.util.Date; +import java.util.List; import java.util.UUID; import javax.persistence.Column; @@ -82,6 +87,9 @@ public class BackupVO implements Backup { @Column(name = "zone_id") private long zoneId; + @Column(name = "backed_volumes", length = 65535) + protected String backedUpVolumes; + public BackupVO() { this.uuid = UUID.randomUUID().toString(); } @@ -158,6 +166,7 @@ public class BackupVO implements Backup { this.status = status; } + @Override public long getBackupOfferingId() { return backupOfferingId; } @@ -202,6 +211,17 @@ public class BackupVO implements Backup { return null; } + public List getBackedUpVolumes() { + if (StringUtils.isEmpty(this.backedUpVolumes)) { + return Collections.emptyList(); + } + return Arrays.asList(new Gson().fromJson(this.backedUpVolumes, Backup.VolumeInfo[].class)); + } + + public void setBackedUpVolumes(String backedUpVolumes) { + this.backedUpVolumes = backedUpVolumes; + } + public Date getRemoved() { return removed; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDao.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDao.java index 5d2f5ac64d6..89a13245b0a 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDao.java @@ -32,9 +32,8 @@ public interface BackupDao extends GenericDao { List listByVmId(Long zoneId, Long vmId); List listByAccountId(Long accountId); - List listByOfferingId(Long offeringId); List syncBackups(Long zoneId, Long vmId, List externalBackups); BackupVO getBackupVO(Backup backup); - + List listByOfferingId(Long backupOfferingId); BackupResponse newBackupResponse(Backup backup); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDaoImpl.java index fefbb68ae77..5a9cd062037 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDaoImpl.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.backup.dao; import java.util.ArrayList; import java.util.List; +import java.util.Objects; import javax.annotation.PostConstruct; import javax.inject.Inject; @@ -68,6 +69,8 @@ public class BackupDaoImpl extends GenericDaoBase implements Bac backupSearch = createSearchBuilder(); backupSearch.and("vm_id", backupSearch.entity().getVmId(), SearchCriteria.Op.EQ); backupSearch.and("external_id", backupSearch.entity().getExternalId(), SearchCriteria.Op.EQ); + backupSearch.and("backup_offering_id", backupSearch.entity().getBackupOfferingId(), SearchCriteria.Op.EQ); + backupSearch.and("zone_id", backupSearch.entity().getZoneId(), SearchCriteria.Op.EQ); backupSearch.done(); } @@ -102,13 +105,6 @@ public class BackupDaoImpl extends GenericDaoBase implements Bac return new ArrayList<>(listBy(sc)); } - @Override - public List listByOfferingId(Long offeringId) { - SearchCriteria sc = backupSearch.create(); - sc.setParameters("offering_id", offeringId); - return new ArrayList<>(listBy(sc)); - } - private Backup findByExternalId(Long zoneId, String externalId) { SearchCriteria sc = backupSearch.create(); sc.setParameters("external_id", externalId); @@ -123,6 +119,13 @@ public class BackupDaoImpl extends GenericDaoBase implements Bac return backupVO; } + @Override + public List listByOfferingId(Long backupOfferingId) { + SearchCriteria sc = backupSearch.create(); + sc.setParameters("backup_offering_id", backupOfferingId); + return new ArrayList<>(listBy(sc)); + } + public void removeExistingBackups(Long zoneId, Long vmId) { SearchCriteria sc = backupSearch.create(); sc.setParameters("vm_id", vmId); @@ -145,7 +148,11 @@ public class BackupDaoImpl extends GenericDaoBase implements Bac AccountVO account = accountDao.findByIdIncludingRemoved(vm.getAccountId()); DomainVO domain = domainDao.findByIdIncludingRemoved(vm.getDomainId()); DataCenterVO zone = dataCenterDao.findByIdIncludingRemoved(vm.getDataCenterId()); - BackupOffering offering = backupOfferingDao.findByIdIncludingRemoved(vm.getBackupOfferingId()); + Long offeringId = backup.getBackupOfferingId(); + if (offeringId == null) { + offeringId = vm.getBackupOfferingId(); + } + BackupOffering offering = backupOfferingDao.findByIdIncludingRemoved(offeringId); BackupResponse response = new BackupResponse(); response.setId(backup.getUuid()); @@ -157,7 +164,14 @@ public class BackupDaoImpl extends GenericDaoBase implements Bac response.setSize(backup.getSize()); response.setProtectedSize(backup.getProtectedSize()); response.setStatus(backup.getStatus()); - response.setVolumes(new Gson().toJson(vm.getBackupVolumeList().toArray(), Backup.VolumeInfo[].class)); + // ACS 4.20: For backups taken prior this release the backup.backed_volumes column would be empty hence use vm_instance.backup_volumes + String backedUpVolumes; + if (Objects.isNull(backup.getBackedUpVolumes())) { + backedUpVolumes = new Gson().toJson(vm.getBackupVolumeList().toArray(), Backup.VolumeInfo[].class); + } else { + backedUpVolumes = new Gson().toJson(backup.getBackedUpVolumes().toArray(), Backup.VolumeInfo[].class); + } + response.setVolumes(backedUpVolumes); response.setBackupOfferingId(offering.getUuid()); response.setBackupOffering(offering.getName()); response.setAccountId(account.getUuid()); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupRepositoryDao.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupRepositoryDao.java new file mode 100644 index 00000000000..0034bfb30ab --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupRepositoryDao.java @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.backup.dao; + +import java.util.List; + +import org.apache.cloudstack.backup.BackupRepository; +import org.apache.cloudstack.backup.BackupRepositoryVO; + +import com.cloud.utils.db.GenericDao; + +public interface BackupRepositoryDao extends GenericDao { + List listByZoneAndProvider(Long zoneId, String provider); + + BackupRepository findByBackupOfferingId(Long backupOfferingId); +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupRepositoryDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupRepositoryDaoImpl.java new file mode 100644 index 00000000000..460b6d8aba4 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupRepositoryDaoImpl.java @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.backup.dao; + +import java.util.ArrayList; +import java.util.List; + +import javax.annotation.PostConstruct; +import javax.inject.Inject; + +import org.apache.cloudstack.backup.BackupOfferingVO; +import org.apache.cloudstack.backup.BackupRepository; +import org.apache.cloudstack.backup.BackupRepositoryVO; + +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +public class BackupRepositoryDaoImpl extends GenericDaoBase implements BackupRepositoryDao { + @Inject + BackupOfferingDao backupOfferingDao; + + private SearchBuilder backupRepoSearch; + + public BackupRepositoryDaoImpl() { + } + + @PostConstruct + protected void init() { + backupRepoSearch = createSearchBuilder(); + backupRepoSearch.and("zone_id", backupRepoSearch.entity().getZoneId(), SearchCriteria.Op.EQ); + backupRepoSearch.and("provider", backupRepoSearch.entity().getProvider(), SearchCriteria.Op.EQ); + backupRepoSearch.done(); + } + + @Override + public List listByZoneAndProvider(Long zoneId, String provider) { + SearchCriteria sc = backupRepoSearch.create(); + sc.setParameters("zone_id", zoneId); + sc.setParameters("provider", provider); + return new ArrayList<>(listBy(sc)); + } + + @Override + public BackupRepository findByBackupOfferingId(Long backupOfferingId) { + BackupOfferingVO offering = backupOfferingDao.findByIdIncludingRemoved(backupOfferingId); + if (offering == null) { + return null; + } + return findByUuid(offering.getExternalId()); + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupScheduleDao.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupScheduleDao.java index 516b0112c98..ee1783a9c89 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupScheduleDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupScheduleDao.java @@ -20,6 +20,7 @@ package org.apache.cloudstack.backup.dao; import java.util.Date; import java.util.List; +import com.cloud.utils.DateUtil; import org.apache.cloudstack.api.response.BackupScheduleResponse; import org.apache.cloudstack.backup.BackupSchedule; import org.apache.cloudstack.backup.BackupScheduleVO; @@ -29,6 +30,10 @@ import com.cloud.utils.db.GenericDao; public interface BackupScheduleDao extends GenericDao { BackupScheduleVO findByVM(Long vmId); + List listByVM(Long vmId); + + BackupScheduleVO findByVMAndIntervalType(Long vmId, DateUtil.IntervalType intervalType); + List getSchedulesToExecute(Date currentTimestamp); BackupScheduleResponse newBackupScheduleResponse(BackupSchedule schedule); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupScheduleDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupScheduleDaoImpl.java index 7a58679e7e5..e00ccc5abd7 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupScheduleDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupScheduleDaoImpl.java @@ -23,6 +23,7 @@ import java.util.List; import javax.annotation.PostConstruct; import javax.inject.Inject; +import com.cloud.utils.DateUtil; import org.apache.cloudstack.api.response.BackupScheduleResponse; import org.apache.cloudstack.backup.BackupSchedule; import org.apache.cloudstack.backup.BackupScheduleVO; @@ -49,6 +50,7 @@ public class BackupScheduleDaoImpl extends GenericDaoBase listByVM(Long vmId) { + SearchCriteria sc = backupScheduleSearch.create(); + sc.setParameters("vm_id", vmId); + return listBy(sc, null); + } + + @Override + public BackupScheduleVO findByVMAndIntervalType(Long vmId, DateUtil.IntervalType intervalType) { + SearchCriteria sc = backupScheduleSearch.create(); + sc.setParameters("vm_id", vmId); + sc.setParameters("interval_type", intervalType.ordinal()); + return findOneBy(sc); + } + @Override public List getSchedulesToExecute(Date currentTimestamp) { SearchCriteria sc = executableSchedulesSearch.create(); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/datacenter/DataCenterIpv4GuestSubnetVO.java b/engine/schema/src/main/java/org/apache/cloudstack/datacenter/DataCenterIpv4GuestSubnetVO.java new file mode 100644 index 00000000000..828e7b39e9a --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/datacenter/DataCenterIpv4GuestSubnetVO.java @@ -0,0 +1,123 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.datacenter; + +import java.util.Date; +import java.util.UUID; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import com.cloud.utils.db.GenericDao; + +@Entity +@Table(name = "dc_ip4_guest_subnets") +public class DataCenterIpv4GuestSubnetVO implements DataCenterIpv4GuestSubnet { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "uuid") + String uuid; + + @Column(name = "data_center_id") + private long dataCenterId; + + @Column(name = "subnet") + private String subnet; + + @Column(name = "domain_id") + Long domainId; + + @Column(name = "account_id") + Long accountId; + + @Column(name = GenericDao.CREATED_COLUMN) + private Date created; + + @Column(name= GenericDao.REMOVED_COLUMN) + private Date removed; + + public DataCenterIpv4GuestSubnetVO(long dcId, String subnet) { + this(); + this.dataCenterId = dcId; + this.subnet = subnet; + this.created = new Date(); + } + + protected DataCenterIpv4GuestSubnetVO() { + this.uuid = UUID.randomUUID().toString(); + } + + @Override + public long getId() { + return id; + } + + @Override + public String getUuid() { + return uuid; + } + + @Override + public Long getDataCenterId() { + return dataCenterId; + } + + public void setDataCenterId(long dcId) { + this.dataCenterId = dcId; + } + + public String getSubnet() { + return subnet; + } + + public void setSubnet(String subnet) { + this.subnet = subnet; + } + + @Override + public Long getDomainId() { + return domainId; + } + + public void setDomainId(Long domainId) { + this.domainId = domainId; + } + + @Override + public Long getAccountId() { + return accountId; + } + + public void setAccountId(Long accountId) { + this.accountId = accountId; + } + + @Override + public Date getCreated() { + return created; + } + + +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/datacenter/dao/DataCenterIpv4GuestSubnetDao.java b/engine/schema/src/main/java/org/apache/cloudstack/datacenter/dao/DataCenterIpv4GuestSubnetDao.java new file mode 100644 index 00000000000..e231b267fda --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/datacenter/dao/DataCenterIpv4GuestSubnetDao.java @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.datacenter.dao; + +import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnetVO; + +import java.util.List; + +public interface DataCenterIpv4GuestSubnetDao extends GenericDao { + + List listByDataCenterId(long dcId); + List listByDataCenterIdAndAccountId(long dcId, long accountId); + List listByDataCenterIdAndDomainId(long dcId, long domainId); + List listNonDedicatedByDataCenterId(long dcId); + List listByAccountId(long accountId); + List listByDomainId(long domainId); +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/datacenter/dao/DataCenterIpv4GuestSubnetDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/datacenter/dao/DataCenterIpv4GuestSubnetDaoImpl.java new file mode 100644 index 00000000000..49e8a6ef662 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/datacenter/dao/DataCenterIpv4GuestSubnetDaoImpl.java @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.datacenter.dao; + +import java.util.List; + +import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnetVO; +import org.springframework.stereotype.Component; + +import com.cloud.utils.db.DB; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.QueryBuilder; +import com.cloud.utils.db.SearchCriteria; + +@Component +@DB +public class DataCenterIpv4GuestSubnetDaoImpl extends GenericDaoBase implements DataCenterIpv4GuestSubnetDao { + + public DataCenterIpv4GuestSubnetDaoImpl() { + } + + @Override + public List listByDataCenterId(long dcId) { + QueryBuilder sc = QueryBuilder.create(DataCenterIpv4GuestSubnetVO.class); + sc.and(sc.entity().getDataCenterId(), SearchCriteria.Op.EQ, dcId); + return sc.list(); + } + + @Override + public List listByDataCenterIdAndAccountId(long dcId, long accountId) { + QueryBuilder sc = QueryBuilder.create(DataCenterIpv4GuestSubnetVO.class); + sc.and(sc.entity().getDataCenterId(), SearchCriteria.Op.EQ, dcId); + sc.and(sc.entity().getAccountId(), SearchCriteria.Op.EQ, accountId); + return sc.list(); + } + + @Override + public List listByDataCenterIdAndDomainId(long dcId, long domainId) { + QueryBuilder sc = QueryBuilder.create(DataCenterIpv4GuestSubnetVO.class); + sc.and(sc.entity().getDataCenterId(), SearchCriteria.Op.EQ, dcId); + sc.and(sc.entity().getDomainId(), SearchCriteria.Op.EQ, domainId); + sc.and(sc.entity().getAccountId(), SearchCriteria.Op.NULL); + return sc.list(); + } + + @Override + public List listNonDedicatedByDataCenterId(long dcId) { + QueryBuilder sc = QueryBuilder.create(DataCenterIpv4GuestSubnetVO.class); + sc.and(sc.entity().getDataCenterId(), SearchCriteria.Op.EQ, dcId); + sc.and(sc.entity().getDomainId(), SearchCriteria.Op.NULL); + sc.and(sc.entity().getAccountId(), SearchCriteria.Op.NULL); + return sc.list(); + } + + @Override + public List listByAccountId(long accountId) { + QueryBuilder sc = QueryBuilder.create(DataCenterIpv4GuestSubnetVO.class); + sc.and(sc.entity().getAccountId(), SearchCriteria.Op.EQ, accountId); + return sc.list(); + } + + @Override + public List listByDomainId(long domainId) { + QueryBuilder sc = QueryBuilder.create(DataCenterIpv4GuestSubnetVO.class); + sc.and(sc.entity().getDomainId(), SearchCriteria.Op.EQ, domainId); + return sc.list(); + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/network/BgpPeerDetailsVO.java b/engine/schema/src/main/java/org/apache/cloudstack/network/BgpPeerDetailsVO.java new file mode 100644 index 00000000000..9e337887011 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/network/BgpPeerDetailsVO.java @@ -0,0 +1,103 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.network; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.EnumType; +import javax.persistence.Enumerated; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import org.apache.cloudstack.api.ResourceDetail; + +@Entity +@Table(name = "bgp_peer_details") +public class BgpPeerDetailsVO implements ResourceDetail { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "bgp_peer_id") + private long resourceId; + + @Enumerated(value = EnumType.STRING) + @Column(name = "name") + private BgpPeer.Detail name; + + @Column(name = "value", length = 1024) + private String value; + + @Column(name = "display") + private boolean display; + + public BgpPeerDetailsVO() { + } + + public BgpPeerDetailsVO(long resourceId, BgpPeer.Detail detailName, String value, boolean display) { + this.resourceId = resourceId; + this.name = detailName; + this.value = value; + this.display = display; + } + + @Override + public long getId() { + return id; + } + + @Override + public long getResourceId() { + return resourceId; + } + + public void setResourceId(long resourceId) { + this.resourceId = resourceId; + } + + public String getName() { + return name.name(); + } + + public BgpPeer.Detail getDetailName() { + return name; + } + + public String getValue() { + return value; + } + + @Override + public boolean isDisplay() { + return display; + } + + public void setId(long id) { + this.id = id; + } + + public void setName(BgpPeer.Detail name) { + this.name = name; + } + + public void setValue(String value) { + this.value = value; + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/network/BgpPeerNetworkMapVO.java b/engine/schema/src/main/java/org/apache/cloudstack/network/BgpPeerNetworkMapVO.java new file mode 100644 index 00000000000..b520ecd5cd1 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/network/BgpPeerNetworkMapVO.java @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.network; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.api.InternalIdentity; + +import java.util.Date; + +@Entity +@Table(name = "bgp_peer_network_map") +public class BgpPeerNetworkMapVO implements InternalIdentity { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "bgp_peer_id") + private long bgpPeerId; + + @Column(name = "network_id") + private Long networkId; + + @Column(name = "vpc_id") + private Long vpcId; + + @Column(name = "state") + private BgpPeer.State state; + + @Column(name = GenericDao.CREATED_COLUMN) + private Date created; + + @Column(name= GenericDao.REMOVED_COLUMN) + private Date removed; + + /** + * There should never be a public constructor for this class. Since it's + * only here to define the table for the DAO class. + */ + protected BgpPeerNetworkMapVO() { + } + + public BgpPeerNetworkMapVO(long bgpPeerId, Long networkId, Long vpcId, BgpPeer.State state) { + this.bgpPeerId = bgpPeerId; + this.networkId = networkId; + this.vpcId = vpcId; + this.state = state; + } + + @Override + public long getId() { + return id; + } + + public long getBgpPeerId() { + return bgpPeerId; + } + + public Long getNetworkId() { + return networkId; + } + + public Long getVpcId() { + return vpcId; + } + + public BgpPeer.State getState() { + return state; + } + + public void setState(BgpPeer.State state) { + this.state = state; + } + + public Date getCreated() { + return created; + } + + public Date getRemoved() { + return removed; + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/network/BgpPeerVO.java b/engine/schema/src/main/java/org/apache/cloudstack/network/BgpPeerVO.java new file mode 100644 index 00000000000..0203b34fb1e --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/network/BgpPeerVO.java @@ -0,0 +1,170 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.network; + +import java.util.Date; +import java.util.UUID; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import com.cloud.utils.db.GenericDao; + +@Entity +@Table(name = "bgp_peers") +public class BgpPeerVO implements BgpPeer { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + long id; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "data_center_id") + private long dataCenterId; + + @Column(name = "ip4_address") + private String ip4Address; + + @Column(name = "ip6_address") + private String ip6Address; + + @Column(name = "as_number") + private Long asNumber; + + @Column(name = "password") + private String password; + + @Column(name = "domain_id") + Long domainId; + + @Column(name = "account_id") + Long accountId; + + @Column(name = GenericDao.CREATED_COLUMN) + private Date created; + + @Column(name= GenericDao.REMOVED_COLUMN) + private Date removed; + + protected BgpPeerVO() { + uuid = UUID.randomUUID().toString(); + } + + public BgpPeerVO(long dcId, String ip4Address, String ip6Address, Long asNumber, String password) { + this(); + this.dataCenterId = dcId; + this.ip4Address = ip4Address; + this.ip6Address = ip6Address; + this.asNumber = asNumber; + this.password = password; + } + + @Override + public String toString() { + return String.format("BgpPeerVO [%s|%s|%s]", asNumber, ip4Address, ip6Address); + } + + @Override + public long getId() { + return id; + } + + @Override + public String getUuid() { + return uuid; + } + + @Override + public long getDataCenterId() { + return dataCenterId; + } + + public void setDataCenterId(long dataCenterId) { + this.dataCenterId = dataCenterId; + } + + @Override + public String getIp4Address() { + return ip4Address; + } + + public void setIp4Address(String ip4Address) { + this.ip4Address = ip4Address; + } + + @Override + public String getIp6Address() { + return ip6Address; + } + + public void setIp6Address(String ip6Address) { + this.ip6Address = ip6Address; + } + + @Override + public Long getAsNumber() { + return asNumber; + } + + public void setAsNumber(Long asNumber) { + this.asNumber = asNumber; + } + + @Override + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + @Override + public Long getDomainId() { + return domainId; + } + + public void setDomainId(Long domainId) { + this.domainId = domainId; + } + + @Override + public Long getAccountId() { + return accountId; + } + + public void setAccountId(Long accountId) { + this.accountId = accountId; + } + + @Override + public Date getCreated() { + return created; + } + + public Date getRemoved() { + return removed; + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/network/Ipv4GuestSubnetNetworkMapVO.java b/engine/schema/src/main/java/org/apache/cloudstack/network/Ipv4GuestSubnetNetworkMapVO.java new file mode 100644 index 00000000000..cc726ba3d35 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/network/Ipv4GuestSubnetNetworkMapVO.java @@ -0,0 +1,143 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.network; + +import java.util.Date; +import java.util.UUID; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.Temporal; +import javax.persistence.TemporalType; + +import com.cloud.utils.db.GenericDao; + +@Entity +@Table(name = "ip4_guest_subnet_network_map") +public class Ipv4GuestSubnetNetworkMapVO implements Ipv4GuestSubnetNetworkMap { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + long id; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "parent_id") + private Long parentId; + + @Column(name = "subnet") + private String subnet; + + @Column(name = "vpc_id") + private Long vpcId; + + @Column(name = "network_id") + private Long networkId; + + @Column(name = "state") + private State state; + + @Temporal(TemporalType.TIMESTAMP) + @Column(name = "allocated") + Date allocated; + + @Column(name = GenericDao.CREATED_COLUMN) + private Date created; + + @Column(name= GenericDao.REMOVED_COLUMN) + private Date removed; + + protected Ipv4GuestSubnetNetworkMapVO() { + uuid = UUID.randomUUID().toString(); + } + + protected Ipv4GuestSubnetNetworkMapVO(Long parentId, String subnet, Long networkId, Ipv4GuestSubnetNetworkMap.State state) { + this.parentId = parentId; + this.subnet = subnet; + this.networkId = networkId; + this.state = state; + uuid = UUID.randomUUID().toString(); + } + + @Override + public long getId() { + return id; + } + + @Override + public String getUuid() { + return uuid; + } + + @Override + public Long getParentId() { + return parentId; + } + + @Override + public String getSubnet() { + return subnet; + } + + @Override + public Long getVpcId() { + return vpcId; + } + + public void setVpcId(Long vpcId) { + this.vpcId = vpcId; + } + + @Override + public Long getNetworkId() { + return networkId; + } + + public void setNetworkId(Long networkId) { + this.networkId = networkId; + } + + @Override + public State getState() { + return state; + } + + public void setState(Ipv4GuestSubnetNetworkMap.State state) { + this.state = state; + } + + public void setAllocated(Date allocated) { + this.allocated = allocated; + } + + @Override + public Date getAllocated() { + return allocated; + } + + @Override + public Date getCreated() { + return created; + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/network/dao/BgpPeerDao.java b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/BgpPeerDao.java new file mode 100644 index 00000000000..8ca4c2d86da --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/BgpPeerDao.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.network.dao; + +import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.network.BgpPeer; +import org.apache.cloudstack.network.BgpPeerVO; + +import java.util.List; +import java.util.Map; + +public interface BgpPeerDao extends GenericDao { + List listNonRevokeByNetworkId(long networkId); + + List listNonRevokeByVpcId(long vpcId); + + BgpPeerVO findByZoneAndAsNumberAndAddress(long zoneId, Long asNumber, String ip4Address, String ip6Address); + + BgpPeerVO persist(BgpPeerVO bgpPeerVO, Map details); + + List listAvailableBgpPeerIdsForAccount(long zoneId, long domainId, long accountId, boolean useSystemBgpPeers); + + int removeByAccountId(long accountId); + int removeByDomainId(long domainId); +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/network/dao/BgpPeerDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/BgpPeerDaoImpl.java new file mode 100644 index 00000000000..0f95f7c3cd5 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/BgpPeerDaoImpl.java @@ -0,0 +1,193 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.network.dao; + +import com.cloud.utils.db.DB; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.JoinBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.network.BgpPeer; +import org.apache.cloudstack.network.BgpPeerDetailsVO; +import org.apache.cloudstack.network.BgpPeerNetworkMapVO; +import org.apache.cloudstack.network.BgpPeerVO; +import org.apache.commons.collections.CollectionUtils; +import org.springframework.stereotype.Component; + +import javax.annotation.PostConstruct; +import javax.inject.Inject; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +@Component +@DB +public class BgpPeerDaoImpl extends GenericDaoBase implements BgpPeerDao { + protected SearchBuilder NetworkIdSearch; + protected SearchBuilder VpcIdSearch; + protected SearchBuilder AllFieldsSearch; + + private static final String LIST_ALL_BGP_PEERS_IDS_FOR_ACCOUNT = "SELECT id FROM `cloud`.`bgp_peers` WHERE removed IS NULL AND data_center_id = ? " + + "AND ((domain_id IS NULL AND account_id IS NULL) " + + "OR (domain_id = ? AND account_id IS NULL) " + + "OR (domain_id = ? AND account_id = ?))"; + + private static final String LIST_DEDICATED_BGP_PEERS_IDS_FOR_ACCOUNT = "SELECT id FROM `cloud`.`bgp_peers` WHERE removed IS NULL AND data_center_id = ? " + + "AND ((domain_id = ? AND account_id IS NULL) " + + "OR (domain_id = ? AND account_id = ?))"; + + @Inject + BgpPeerNetworkMapDao bgpPeerNetworkMapDao; + @Inject + BgpPeerDetailsDao bgpPeerDetailsDao; + + @PostConstruct + public void init() { + final SearchBuilder networkSearchBuilder = bgpPeerNetworkMapDao.createSearchBuilder(); + networkSearchBuilder.and("networkId", networkSearchBuilder.entity().getNetworkId(), SearchCriteria.Op.EQ); + networkSearchBuilder.and("state", networkSearchBuilder.entity().getState(), SearchCriteria.Op.IN); + networkSearchBuilder.and("removed", networkSearchBuilder.entity().getRemoved(), SearchCriteria.Op.NULL); + NetworkIdSearch = createSearchBuilder(); + NetworkIdSearch.join("network", networkSearchBuilder, networkSearchBuilder.entity().getBgpPeerId(), + NetworkIdSearch.entity().getId(), JoinBuilder.JoinType.INNER); + NetworkIdSearch.done(); + + final SearchBuilder vpcSearchBuilder = bgpPeerNetworkMapDao.createSearchBuilder(); + vpcSearchBuilder.and("vpcId", vpcSearchBuilder.entity().getVpcId(), SearchCriteria.Op.EQ); + vpcSearchBuilder.and("state", vpcSearchBuilder.entity().getState(), SearchCriteria.Op.IN); + vpcSearchBuilder.and("removed", vpcSearchBuilder.entity().getRemoved(), SearchCriteria.Op.NULL); + VpcIdSearch = createSearchBuilder(); + VpcIdSearch.join("vpc", vpcSearchBuilder, vpcSearchBuilder.entity().getBgpPeerId(), + VpcIdSearch.entity().getId(), JoinBuilder.JoinType.INNER); + VpcIdSearch.done(); + + AllFieldsSearch = createSearchBuilder(); + AllFieldsSearch.and("zoneId", AllFieldsSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("domainId", AllFieldsSearch.entity().getDomainId(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("accountId", AllFieldsSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("asNumber", AllFieldsSearch.entity().getAsNumber(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("ip4Address", AllFieldsSearch.entity().getIp4Address(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("ip6Address", AllFieldsSearch.entity().getIp6Address(), SearchCriteria.Op.EQ); + AllFieldsSearch.done(); + } + + @Override + public List listNonRevokeByNetworkId(long networkId) { + SearchCriteria sc = NetworkIdSearch.create(); + sc.setJoinParameters("network", "networkId", networkId); + sc.setJoinParameters("network", "state", BgpPeer.State.Active, BgpPeer.State.Add); + return listBy(sc); + } + + @Override + public List listNonRevokeByVpcId(long vpcId) { + SearchCriteria sc = VpcIdSearch.create(); + sc.setJoinParameters("vpc", "vpcId", vpcId); + sc.setJoinParameters("vpc", "state", BgpPeer.State.Active, BgpPeer.State.Add); + return listBy(sc); + } + + @Override + public BgpPeerVO findByZoneAndAsNumberAndAddress(long zoneId, Long asNumber, String ip4Address, String ip6Address) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters( "zoneId", zoneId); + sc.setParameters( "asNumber", asNumber); + if (ip4Address != null) { + sc.setParameters( "ip4Address", ip4Address); + } + if (ip6Address != null) { + sc.setParameters( "ip6Address", ip6Address); + } + return findOneBy(sc); + } + + @Override + public BgpPeerVO persist(BgpPeerVO bgpPeerVO, Map details) { + TransactionLegacy txn = TransactionLegacy.currentTxn(); + txn.start(); + BgpPeerVO vo = super.persist(bgpPeerVO); + + // persist the details + if (details != null && !details.isEmpty()) { + for (BgpPeer.Detail detail : details.keySet()) { + bgpPeerDetailsDao.persist(new BgpPeerDetailsVO(bgpPeerVO.getId(), detail, details.get(detail), true)); + } + } + + txn.commit(); + return vo; + } + + @Override + public List listAvailableBgpPeerIdsForAccount(long zoneId, long domainId, long accountId, boolean useSystemBgpPeers) { + if (useSystemBgpPeers) { + return listBgpPeerIdsForAccount(zoneId, domainId, accountId, false); + } else { + List dedicatedBgpPeerIds = listBgpPeerIdsForAccount(zoneId, domainId, accountId, true); + if (CollectionUtils.isNotEmpty(dedicatedBgpPeerIds)) { + return dedicatedBgpPeerIds; + } + return listBgpPeerIdsForAccount(zoneId, domainId, accountId, false); + } + } + + private List listBgpPeerIdsForAccount(long zoneId, long domainId, long accountId, boolean isDedicated) { + TransactionLegacy txn = TransactionLegacy.currentTxn(); + PreparedStatement pstmt = null; + List result = new ArrayList(); + + StringBuilder sql = isDedicated ? new StringBuilder(LIST_DEDICATED_BGP_PEERS_IDS_FOR_ACCOUNT): new StringBuilder(LIST_ALL_BGP_PEERS_IDS_FOR_ACCOUNT); + + try { + pstmt = txn.prepareAutoCloseStatement(sql.toString()); + pstmt.setLong(1, zoneId); + pstmt.setLong(2, domainId); + pstmt.setLong(3, domainId); + pstmt.setLong(4, accountId); + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + result.add(rs.getLong(1)); + } + return result; + } catch (SQLException e) { + throw new CloudRuntimeException("DB Exception on: " + sql, e); + } catch (Throwable e) { + throw new CloudRuntimeException("Caught: " + sql, e); + } + } + + @Override + public int removeByAccountId(long accountId) { + SearchCriteria sc = createSearchCriteria(); + sc.addAnd("accountId", SearchCriteria.Op.EQ, accountId); + return remove(sc); + } + + @Override + public int removeByDomainId(long domainId) { + SearchCriteria sc = createSearchCriteria(); + sc.addAnd("domainId", SearchCriteria.Op.EQ, domainId); + return remove(sc); + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/network/dao/BgpPeerDetailsDao.java b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/BgpPeerDetailsDao.java new file mode 100644 index 00000000000..377bc45ebfe --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/BgpPeerDetailsDao.java @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.network.dao; + +import org.apache.cloudstack.network.BgpPeer; +import org.apache.cloudstack.network.BgpPeerDetailsVO; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; + +import java.util.List; +import java.util.Map; + +public interface BgpPeerDetailsDao extends ResourceDetailsDao { + Map getBgpPeerDetails(long bgpPeerId); + String getDetail(long offeringId, BgpPeer.Detail detailName); + List findDomainIds(final long resourceId); + List findZoneIds(final long resourceId); + + int removeByBgpPeerId(long bgpPeerId); +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/network/dao/BgpPeerDetailsDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/BgpPeerDetailsDaoImpl.java new file mode 100644 index 00000000000..a974cf5e276 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/BgpPeerDetailsDaoImpl.java @@ -0,0 +1,123 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.network.dao; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.api.ApiConstants; + +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.SearchCriteria.Func; +import com.cloud.utils.db.SearchCriteria.Op; +import org.apache.cloudstack.network.BgpPeer; +import org.apache.cloudstack.network.BgpPeerDetailsVO; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; +import org.apache.commons.lang3.EnumUtils; + +public class BgpPeerDetailsDaoImpl extends ResourceDetailsDaoBase implements BgpPeerDetailsDao { + protected final SearchBuilder DetailSearch; + private final GenericSearchBuilder ValueSearch; + + public BgpPeerDetailsDaoImpl() { + + DetailSearch = createSearchBuilder(); + DetailSearch.and("resourceId", DetailSearch.entity().getResourceId(), SearchCriteria.Op.EQ); + DetailSearch.and("name", DetailSearch.entity().getName(), SearchCriteria.Op.EQ); + DetailSearch.and("value", DetailSearch.entity().getValue(), SearchCriteria.Op.EQ); + DetailSearch.and("display", DetailSearch.entity().isDisplay(), SearchCriteria.Op.EQ); + DetailSearch.done(); + + ValueSearch = createSearchBuilder(String.class); + ValueSearch.select(null, Func.DISTINCT, ValueSearch.entity().getValue()); + ValueSearch.and("resourceId", ValueSearch.entity().getResourceId(), SearchCriteria.Op.EQ); + ValueSearch.and("name", ValueSearch.entity().getName(), Op.EQ); + ValueSearch.and("display", ValueSearch.entity().isDisplay(), SearchCriteria.Op.EQ); + ValueSearch.done(); + } + + @Override + public Map getBgpPeerDetails(long bgpPeerId) { + SearchCriteria sc = DetailSearch.create(); + sc.setParameters("resourceId", bgpPeerId); + sc.setParameters("display", true); + + List results = search(sc, null); + if (results.size() == 0) { + return null; + } + Map details = new HashMap<>(results.size()); + for (BgpPeerDetailsVO result : results) { + details.put(result.getDetailName(), result.getValue()); + } + + return details; + } + + @Override + public String getDetail(long bgpPeerId, BgpPeer.Detail detailName) { + SearchCriteria sc = ValueSearch.create(); + sc.setParameters("name", detailName); + sc.setParameters("resourceId", bgpPeerId); + List results = customSearch(sc, null); + if (results.isEmpty()) { + return null; + } else { + return results.get(0); + } + } + + @Override + public void addDetail(long resourceId, String key, String value, boolean display) { + persist(new BgpPeerDetailsVO(resourceId, EnumUtils.getEnumIgnoreCase(BgpPeer.Detail.class, key), value, display)); + } + + @Override + public List findDomainIds(long resourceId) { + final List domainIds = new ArrayList<>(); + for (final BgpPeerDetailsVO detail: findDetails(resourceId, ApiConstants.DOMAIN_ID)) { + final Long domainId = Long.valueOf(detail.getValue()); + if (domainId > 0) { + domainIds.add(domainId); + } + } + return domainIds; + } + + @Override + public List findZoneIds(long resourceId) { + final List zoneIds = new ArrayList<>(); + for (final BgpPeerDetailsVO detail: findDetails(resourceId, ApiConstants.ZONE_ID)) { + final Long zoneId = Long.valueOf(detail.getValue()); + if (zoneId > 0) { + zoneIds.add(zoneId); + } + } + return zoneIds; + } + + @Override + public int removeByBgpPeerId(long bgpPeerId) { + SearchCriteria sc = DetailSearch.create(); + sc.setParameters("resourceId", bgpPeerId); + return remove(sc); + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/network/dao/BgpPeerNetworkMapDao.java b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/BgpPeerNetworkMapDao.java new file mode 100644 index 00000000000..8d8ec8c998a --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/BgpPeerNetworkMapDao.java @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.network.dao; + +import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.network.BgpPeerNetworkMapVO; + +import java.util.List; + + +public interface BgpPeerNetworkMapDao extends GenericDao { + + void persistForNetwork(long networkId, List bgpPeerIds); + + List listByBgpPeerId(long bgpPeerId); + + List listByNetworkId(long networkId); + + List listUsedNetworksByOtherDomains(long bgpPeerId, Long domainId); + + List listUsedNetworksByOtherAccounts(long bgpPeerId, Long accountId); + + int removeByNetworkId(long networkId); + + void persistForVpc(long vpcId, List bgpPeerIds); + + List listByVpcId(long vpcId); + + List listUsedVpcsByOtherDomains(long bgpPeerId, Long domainId); + + List listUsedVpcsByOtherAccounts(long bgpPeerId, Long accountId); + + int removeByVpcId(long vpcId); +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/network/dao/BgpPeerNetworkMapDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/BgpPeerNetworkMapDaoImpl.java new file mode 100644 index 00000000000..a5e5f47684a --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/BgpPeerNetworkMapDaoImpl.java @@ -0,0 +1,185 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.network.dao; + +import java.util.List; + +import javax.annotation.PostConstruct; +import javax.inject.Inject; + +import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkVO; +import com.cloud.network.vpc.VpcVO; +import com.cloud.network.vpc.dao.VpcDao; +import com.cloud.utils.db.JoinBuilder; +import org.apache.cloudstack.network.BgpPeer; +import org.apache.cloudstack.network.BgpPeerNetworkMapVO; +import org.springframework.stereotype.Component; + +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionLegacy; + +@Component +public class BgpPeerNetworkMapDaoImpl extends GenericDaoBase implements BgpPeerNetworkMapDao { + + protected SearchBuilder BgpPeerNetworkVpcSearch; + protected SearchBuilder NetworkDomainAccountNeqSearch; + protected SearchBuilder VpcDomainAccountNeqSearch; + + @Inject + NetworkDao networkDao; + @Inject + VpcDao vpcDao; + + public BgpPeerNetworkMapDaoImpl() { + } + + @PostConstruct + public void init() { + BgpPeerNetworkVpcSearch = createSearchBuilder(); + BgpPeerNetworkVpcSearch.and("bgpPeerId", BgpPeerNetworkVpcSearch.entity().getBgpPeerId(), SearchCriteria.Op.EQ); + BgpPeerNetworkVpcSearch.and("networkId", BgpPeerNetworkVpcSearch.entity().getNetworkId(), SearchCriteria.Op.EQ); + BgpPeerNetworkVpcSearch.and("vpcId", BgpPeerNetworkVpcSearch.entity().getVpcId(), SearchCriteria.Op.EQ); + BgpPeerNetworkVpcSearch.done(); + + final SearchBuilder networkSearchBuilder = networkDao.createSearchBuilder(); + networkSearchBuilder.and("domainId", networkSearchBuilder.entity().getDomainId(), SearchCriteria.Op.NEQ); + networkSearchBuilder.and("accountId", networkSearchBuilder.entity().getAccountId(), SearchCriteria.Op.NEQ); + NetworkDomainAccountNeqSearch = createSearchBuilder(); + NetworkDomainAccountNeqSearch.and("bgpPeerId", NetworkDomainAccountNeqSearch.entity().getBgpPeerId(), SearchCriteria.Op.EQ); + NetworkDomainAccountNeqSearch.join("network", networkSearchBuilder, networkSearchBuilder.entity().getId(), + NetworkDomainAccountNeqSearch.entity().getNetworkId(), JoinBuilder.JoinType.INNER); + NetworkDomainAccountNeqSearch.done(); + + final SearchBuilder vpcSearchBuilder = vpcDao.createSearchBuilder(); + vpcSearchBuilder.and("domainId", vpcSearchBuilder.entity().getDomainId(), SearchCriteria.Op.NEQ); + vpcSearchBuilder.and("accountId", vpcSearchBuilder.entity().getAccountId(), SearchCriteria.Op.NEQ); + VpcDomainAccountNeqSearch = createSearchBuilder(); + VpcDomainAccountNeqSearch.and("bgpPeerId", VpcDomainAccountNeqSearch.entity().getBgpPeerId(), SearchCriteria.Op.EQ); + VpcDomainAccountNeqSearch.join("vpc", vpcSearchBuilder, vpcSearchBuilder.entity().getId(), + VpcDomainAccountNeqSearch.entity().getVpcId(), JoinBuilder.JoinType.INNER); + VpcDomainAccountNeqSearch.done(); + } + + @Override + public void persistForNetwork(long networkId, List bgpPeerIds) { + TransactionLegacy txn = TransactionLegacy.currentTxn(); + + txn.start(); + SearchCriteria sc = BgpPeerNetworkVpcSearch.create(); + sc.setParameters("networkId", networkId); + expunge(sc); + + for (Long bgpPeerId : bgpPeerIds) { + BgpPeerNetworkMapVO vo = new BgpPeerNetworkMapVO(bgpPeerId, networkId, null, BgpPeer.State.Active); + persist(vo); + } + + txn.commit(); + } + + @Override + public List listByBgpPeerId(long bgpPeerId) { + SearchCriteria sc = BgpPeerNetworkVpcSearch.create(); + sc.setParameters("bgpPeerId", bgpPeerId); + + return search(sc, null); + } + + @Override + public List listByNetworkId(long networkId) { + SearchCriteria sc = BgpPeerNetworkVpcSearch.create(); + sc.setParameters("networkId", networkId); + + return search(sc, null); + } + + @Override + public List listUsedNetworksByOtherDomains(long bgpPeerId, Long domainId) { + SearchCriteria sc = NetworkDomainAccountNeqSearch.create(); + sc.setParameters("bgpPeerId", bgpPeerId); + sc.setJoinParameters("network", "domainId", domainId); + return listBy(sc); + } + + @Override + public List listUsedNetworksByOtherAccounts(long bgpPeerId, Long accountId) { + SearchCriteria sc = NetworkDomainAccountNeqSearch.create(); + sc.setParameters("bgpPeerId", bgpPeerId); + sc.setJoinParameters("network", "accountId", accountId); + return listBy(sc); + } + + @Override + public int removeByNetworkId(long networkId) { + SearchCriteria sc = BgpPeerNetworkVpcSearch.create(); + sc.setParameters("networkId", networkId); + + return remove(sc); + } + + @Override + public void persistForVpc(long vpcId, List bgpPeerIds) { + TransactionLegacy txn = TransactionLegacy.currentTxn(); + + txn.start(); + SearchCriteria sc = BgpPeerNetworkVpcSearch.create(); + sc.setParameters("vpcId", vpcId); + expunge(sc); + + for (Long bgpPeerId : bgpPeerIds) { + BgpPeerNetworkMapVO vo = new BgpPeerNetworkMapVO(bgpPeerId, null, vpcId, BgpPeer.State.Active); + persist(vo); + } + + txn.commit(); + } + + @Override + public List listByVpcId(long vpcId) { + SearchCriteria sc = BgpPeerNetworkVpcSearch.create(); + sc.setParameters("vpcId", vpcId); + + return search(sc, null); + } + + @Override + public List listUsedVpcsByOtherDomains(long bgpPeerId, Long domainId) { + SearchCriteria sc = VpcDomainAccountNeqSearch.create(); + sc.setParameters("bgpPeerId", bgpPeerId); + sc.setJoinParameters("vpc", "domainId", domainId); + return listBy(sc); + } + + @Override + public List listUsedVpcsByOtherAccounts(long bgpPeerId, Long accountId) { + SearchCriteria sc = VpcDomainAccountNeqSearch.create(); + sc.setParameters("bgpPeerId", bgpPeerId); + sc.setJoinParameters("vpc", "accountId", accountId); + return listBy(sc); + } + + @Override + public int removeByVpcId(long vpcId) { + SearchCriteria sc = BgpPeerNetworkVpcSearch.create(); + sc.setParameters("vpcId", vpcId); + + return remove(sc); + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/network/dao/Ipv4GuestSubnetNetworkMapDao.java b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/Ipv4GuestSubnetNetworkMapDao.java new file mode 100644 index 00000000000..c3f860009db --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/Ipv4GuestSubnetNetworkMapDao.java @@ -0,0 +1,38 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.network.dao; + +import java.util.List; + +import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMap; +import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMapVO; + +public interface Ipv4GuestSubnetNetworkMapDao extends GenericDao { + List listByParent(long parentId); + List listUsedByParent(long parentId); + List listUsedByOtherDomains(long parentId, Long domainId); + List listUsedByOtherAccounts(long parentId, Long accountId); + Ipv4GuestSubnetNetworkMapVO findFirstAvailable(long parentId, long cidrSize); + Ipv4GuestSubnetNetworkMapVO findByNetworkId(long networkId); + Ipv4GuestSubnetNetworkMapVO findByVpcId(long vpcId); + Ipv4GuestSubnetNetworkMapVO findBySubnet(String subnet); + List findSubnetsInStates(Ipv4GuestSubnetNetworkMap.State... states); + void deleteByParentId(long parentId); + List listAllNoParent(); +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/network/dao/Ipv4GuestSubnetNetworkMapDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/Ipv4GuestSubnetNetworkMapDaoImpl.java new file mode 100644 index 00000000000..95e53448907 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/Ipv4GuestSubnetNetworkMapDaoImpl.java @@ -0,0 +1,170 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.network.dao; + +import java.util.List; + +import javax.annotation.PostConstruct; +import javax.inject.Inject; + +import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMap; +import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMapVO; +import org.apache.commons.collections.CollectionUtils; +import org.springframework.stereotype.Component; + +import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkVO; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.JoinBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@Component +@DB +public class Ipv4GuestSubnetNetworkMapDaoImpl extends GenericDaoBase implements Ipv4GuestSubnetNetworkMapDao { + + protected SearchBuilder ParentStateSearch; + protected SearchBuilder ParentIdSearch; + protected SearchBuilder NoParentSearch; + protected SearchBuilder NetworkIdSearch; + protected SearchBuilder SubnetSearch; + protected SearchBuilder StatesSearch; + protected SearchBuilder DomainAccountNeqSearch; + + @Inject + NetworkDao networkDao; + + @PostConstruct + public void init() { + ParentStateSearch = createSearchBuilder(); + ParentStateSearch.and("parentId", ParentStateSearch.entity().getParentId(), SearchCriteria.Op.EQ); + ParentStateSearch.and("state", ParentStateSearch.entity().getState(), SearchCriteria.Op.IN); + ParentStateSearch.and("subnet", ParentStateSearch.entity().getSubnet(), SearchCriteria.Op.LIKE); + ParentStateSearch.done(); + ParentIdSearch = createSearchBuilder(); + ParentIdSearch.and("parentId", ParentIdSearch.entity().getParentId(), SearchCriteria.Op.EQ); + ParentIdSearch.done(); + NoParentSearch = createSearchBuilder(); + NoParentSearch.and("parentId", NoParentSearch.entity().getParentId(), SearchCriteria.Op.NULL); + NoParentSearch.done(); + NetworkIdSearch = createSearchBuilder(); + NetworkIdSearch.and("networkId", NetworkIdSearch.entity().getNetworkId(), SearchCriteria.Op.EQ); + NetworkIdSearch.and("vpcId", NetworkIdSearch.entity().getVpcId(), SearchCriteria.Op.EQ); + NetworkIdSearch.done(); + SubnetSearch = createSearchBuilder(); + SubnetSearch.and("subnet", SubnetSearch.entity().getSubnet(), SearchCriteria.Op.EQ); + SubnetSearch.done(); + StatesSearch = createSearchBuilder(); + StatesSearch.and("state", StatesSearch.entity().getState(), SearchCriteria.Op.IN); + StatesSearch.done(); + + final SearchBuilder networkSearchBuilder = networkDao.createSearchBuilder(); + networkSearchBuilder.and("domainId", networkSearchBuilder.entity().getDomainId(), SearchCriteria.Op.NEQ); + networkSearchBuilder.and("accountId", networkSearchBuilder.entity().getAccountId(), SearchCriteria.Op.NEQ); + DomainAccountNeqSearch = createSearchBuilder(); + DomainAccountNeqSearch.and("parentId", DomainAccountNeqSearch.entity().getParentId(), SearchCriteria.Op.EQ); + DomainAccountNeqSearch.join("network", networkSearchBuilder, networkSearchBuilder.entity().getId(), + DomainAccountNeqSearch.entity().getNetworkId(), JoinBuilder.JoinType.INNER); + DomainAccountNeqSearch.done(); + } + + @Override + public List listByParent(long parentId) { + SearchCriteria sc = ParentIdSearch.create(); + sc.setParameters("parentId", parentId); + return listBy(sc, null); + } + + @Override + public List listUsedByParent(long parentId) { + SearchCriteria sc = ParentStateSearch.create(); + sc.setParameters("parentId", parentId); + sc.setParameters("state", (Object[]) new Ipv4GuestSubnetNetworkMap.State[]{Ipv4GuestSubnetNetworkMap.State.Allocated, Ipv4GuestSubnetNetworkMap.State.Allocating}); + return listBy(sc, null); + } + + @Override + public List listUsedByOtherDomains(long parentId, Long domainId) { + SearchCriteria sc = DomainAccountNeqSearch.create(); + sc.setParameters("parentId", parentId); + sc.setJoinParameters("network", "domainId", domainId); + return listBy(sc); + } + + @Override + public List listUsedByOtherAccounts(long parentId, Long accountId) { + SearchCriteria sc = DomainAccountNeqSearch.create(); + sc.setParameters("parentId", parentId); + sc.setJoinParameters("network", "accountId", accountId); + return listBy(sc); + } + + @Override + public Ipv4GuestSubnetNetworkMapVO findFirstAvailable(long parentId, long cidrSize) { + SearchCriteria sc = ParentStateSearch.create(); + sc.setParameters("parentId", parentId); + sc.setParameters("subnet", "%/" + cidrSize); + sc.setParameters("state", (Object[]) new Ipv4GuestSubnetNetworkMap.State[]{Ipv4GuestSubnetNetworkMap.State.Free}); + Filter searchFilter = new Filter(Ipv4GuestSubnetNetworkMapVO.class, "id", true, null, 1L); + List list = listBy(sc, searchFilter); + return CollectionUtils.isNotEmpty(list) ? list.get(0) : null; + } + + @Override + public Ipv4GuestSubnetNetworkMapVO findByNetworkId(long networkId) { + SearchCriteria sc = NetworkIdSearch.create(); + sc.setParameters("networkId", networkId); + return findOneBy(sc); + } + + @Override + public Ipv4GuestSubnetNetworkMapVO findByVpcId(long vpcId) { + SearchCriteria sc = NetworkIdSearch.create(); + sc.setParameters("vpcId", vpcId); + return findOneBy(sc); + } + + @Override + public Ipv4GuestSubnetNetworkMapVO findBySubnet(String subnet) { + SearchCriteria sc = SubnetSearch.create(); + sc.setParameters("subnet", subnet); + return findOneBy(sc); + } + + @Override + public List findSubnetsInStates(Ipv4GuestSubnetNetworkMap.State... states) { + SearchCriteria sc = StatesSearch.create(); + sc.setParameters("state", (Object[])states); + return listBy(sc); + } + + @Override + public void deleteByParentId(long parentId) { + SearchCriteria sc = ParentIdSearch.create(); + sc.setParameters("parentId", parentId); + remove(sc); + } + + @Override + public List listAllNoParent() { + SearchCriteria sc = NoParentSearch.create(); + return listBy(sc, null); + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/reservation/ReservationVO.java b/engine/schema/src/main/java/org/apache/cloudstack/reservation/ReservationVO.java index df888312a92..df0ede6821a 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/reservation/ReservationVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/reservation/ReservationVO.java @@ -25,10 +25,14 @@ import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; +import com.cloud.utils.db.GenericDao; import org.apache.cloudstack.user.ResourceReservation; import com.cloud.configuration.Resource; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.utils.identity.ManagementServerNode; + +import java.util.Date; @Entity @Table(name = "resource_reservation") @@ -57,6 +61,12 @@ public class ReservationVO implements ResourceReservation { @Column(name = "amount") long amount; + @Column(name = "mgmt_server_id") + Long managementServerId; + + @Column(name = GenericDao.CREATED_COLUMN) + private Date created; + protected ReservationVO() { } @@ -69,6 +79,7 @@ public class ReservationVO implements ResourceReservation { this.resourceType = resourceType; this.tag = tag; this.amount = delta; + this.managementServerId = ManagementServerNode.getManagementServerId(); } public ReservationVO(Long accountId, Long domainId, Resource.ResourceType resourceType, Long delta) { @@ -114,4 +125,16 @@ public class ReservationVO implements ResourceReservation { this.resourceId = resourceId; } + @Override + public Date getCreated() { + return created; + } + + public void setCreated(Date created) { + this.created = created; + } + + public Long getManagementServerId() { + return managementServerId; + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/reservation/dao/ReservationDao.java b/engine/schema/src/main/java/org/apache/cloudstack/reservation/dao/ReservationDao.java index 0433dc8c57d..d6d494f61f9 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/reservation/dao/ReservationDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/reservation/dao/ReservationDao.java @@ -23,13 +23,17 @@ import org.apache.cloudstack.reservation.ReservationVO; import com.cloud.configuration.Resource; import com.cloud.utils.db.GenericDao; +import java.util.Date; import java.util.List; public interface ReservationDao extends GenericDao { long getAccountReservation(Long account, Resource.ResourceType resourceType, String tag); long getDomainReservation(Long domain, Resource.ResourceType resourceType, String tag); void setResourceId(Resource.ResourceType type, Long resourceId); - List getResourceIds(long accountId, Resource.ResourceType type); List getReservationsForAccount(long accountId, Resource.ResourceType type, String tag); void removeByIds(List reservationIds); + + int removeByMsId(long managementServerId); + + int removeStaleReservations(Long accountId, Resource.ResourceType resourceType, String tag, Date createdBefore); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/reservation/dao/ReservationDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/reservation/dao/ReservationDaoImpl.java index 8d6e0b6eee0..3b17f4e4294 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/reservation/dao/ReservationDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/reservation/dao/ReservationDaoImpl.java @@ -18,8 +18,8 @@ // package org.apache.cloudstack.reservation.dao; +import java.util.Date; import java.util.List; -import java.util.stream.Collectors; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.reservation.ReservationVO; @@ -42,6 +42,8 @@ public class ReservationDaoImpl extends GenericDaoBase impl private static final String ACCOUNT_ID = "accountId"; private static final String DOMAIN_ID = "domainId"; private static final String IDS = "ids"; + private static final String MS_ID = "managementServerId"; + private static final String CREATED = "created"; private final SearchBuilder listResourceByAccountAndTypeSearch; private final SearchBuilder listAccountAndTypeSearch; private final SearchBuilder listAccountAndTypeAndNoTagSearch; @@ -50,6 +52,7 @@ public class ReservationDaoImpl extends GenericDaoBase impl private final SearchBuilder listDomainAndTypeAndNoTagSearch; private final SearchBuilder listResourceByAccountAndTypeAndNoTagSearch; private final SearchBuilder listIdsSearch; + private final SearchBuilder listMsIdSearch; public ReservationDaoImpl() { @@ -71,12 +74,14 @@ public class ReservationDaoImpl extends GenericDaoBase impl listAccountAndTypeSearch.and(ACCOUNT_ID, listAccountAndTypeSearch.entity().getAccountId(), SearchCriteria.Op.EQ); listAccountAndTypeSearch.and(RESOURCE_TYPE, listAccountAndTypeSearch.entity().getResourceType(), SearchCriteria.Op.EQ); listAccountAndTypeSearch.and(RESOURCE_TAG, listAccountAndTypeSearch.entity().getTag(), SearchCriteria.Op.EQ); + listAccountAndTypeSearch.and(CREATED, listAccountAndTypeSearch.entity().getCreated(), SearchCriteria.Op.LT); listAccountAndTypeSearch.done(); listAccountAndTypeAndNoTagSearch = createSearchBuilder(); listAccountAndTypeAndNoTagSearch.and(ACCOUNT_ID, listAccountAndTypeAndNoTagSearch.entity().getAccountId(), SearchCriteria.Op.EQ); listAccountAndTypeAndNoTagSearch.and(RESOURCE_TYPE, listAccountAndTypeAndNoTagSearch.entity().getResourceType(), SearchCriteria.Op.EQ); listAccountAndTypeAndNoTagSearch.and(RESOURCE_TAG, listAccountAndTypeAndNoTagSearch.entity().getTag(), SearchCriteria.Op.NULL); + listAccountAndTypeAndNoTagSearch.and(CREATED, listAccountAndTypeAndNoTagSearch.entity().getCreated(), SearchCriteria.Op.LT); listAccountAndTypeAndNoTagSearch.done(); listDomainAndTypeSearch = createSearchBuilder(); @@ -94,18 +99,24 @@ public class ReservationDaoImpl extends GenericDaoBase impl listIdsSearch = createSearchBuilder(); listIdsSearch.and(IDS, listIdsSearch.entity().getId(), SearchCriteria.Op.IN); listIdsSearch.done(); + + listMsIdSearch = createSearchBuilder(); + listMsIdSearch.and(MS_ID, listMsIdSearch.entity().getManagementServerId(), SearchCriteria.Op.EQ); + listMsIdSearch.done(); } @Override public long getAccountReservation(Long accountId, Resource.ResourceType resourceType, String tag) { long total = 0; - SearchCriteria sc = tag == null ? - listAccountAndTypeAndNoTagSearch.create() : listAccountAndTypeSearch.create(); - sc.setParameters(ACCOUNT_ID, accountId); - sc.setParameters(RESOURCE_TYPE, resourceType); - if (tag != null) { + SearchCriteria sc; + if (tag == null) { + sc = listAccountAndTypeAndNoTagSearch.create(); + } else { + sc = listAccountAndTypeSearch.create(); sc.setParameters(RESOURCE_TAG, tag); } + sc.setParameters(ACCOUNT_ID, accountId); + sc.setParameters(RESOURCE_TYPE, resourceType); List reservations = listBy(sc); for (ReservationVO reservation : reservations) { total += reservation.getReservedAmount(); @@ -116,13 +127,15 @@ public class ReservationDaoImpl extends GenericDaoBase impl @Override public long getDomainReservation(Long domainId, Resource.ResourceType resourceType, String tag) { long total = 0; - SearchCriteria sc = tag == null ? - listDomainAndTypeAndNoTagSearch.create() : listDomainAndTypeSearch.create(); - sc.setParameters(DOMAIN_ID, domainId); - sc.setParameters(RESOURCE_TYPE, resourceType); - if (tag != null) { + SearchCriteria sc; + if (tag == null) { + sc = listDomainAndTypeAndNoTagSearch.create(); + } else { + sc = listDomainAndTypeSearch.create(); sc.setParameters(RESOURCE_TAG, tag); } + sc.setParameters(DOMAIN_ID, domainId); + sc.setParameters(RESOURCE_TYPE, resourceType); List reservations = listBy(sc); for (ReservationVO reservation : reservations) { total += reservation.getReservedAmount(); @@ -149,23 +162,17 @@ public class ReservationDaoImpl extends GenericDaoBase impl } } - @Override - public List getResourceIds(long accountId, Resource.ResourceType type) { - SearchCriteria sc = listResourceByAccountAndTypeSearch.create(); - sc.setParameters(ACCOUNT_ID, accountId); - sc.setParameters(RESOURCE_TYPE, type); - return listBy(sc).stream().map(ReservationVO::getResourceId).collect(Collectors.toList()); - } - @Override public List getReservationsForAccount(long accountId, Resource.ResourceType type, String tag) { - SearchCriteria sc = tag == null ? - listResourceByAccountAndTypeAndNoTagSearch.create() : listResourceByAccountAndTypeSearch.create(); - sc.setParameters(ACCOUNT_ID, accountId); - sc.setParameters(RESOURCE_TYPE, type); - if (tag != null) { + SearchCriteria sc; + if (tag == null) { + sc = listResourceByAccountAndTypeAndNoTagSearch.create(); + } else { + sc = listResourceByAccountAndTypeSearch.create(); sc.setParameters(RESOURCE_TAG, tag); } + sc.setParameters(ACCOUNT_ID, accountId); + sc.setParameters(RESOURCE_TYPE, type); return listBy(sc); } @@ -177,4 +184,28 @@ public class ReservationDaoImpl extends GenericDaoBase impl remove(sc); } } + + @Override + public int removeByMsId(long managementServerId) { + SearchCriteria sc = listMsIdSearch.create(); + sc.setParameters(MS_ID, managementServerId); + return remove(sc); + } + + @Override + public int removeStaleReservations(Long accountId, Resource.ResourceType resourceType, String tag, + Date createdBefore) { + SearchCriteria sc; + if (tag == null) { + sc = listAccountAndTypeAndNoTagSearch.create(); + } else { + sc = listAccountAndTypeSearch.create(); + sc.setParameters(RESOURCE_TAG, tag); + } + sc.setParameters(ACCOUNT_ID, accountId); + sc.setParameters(RESOURCE_TYPE, resourceType); + sc.setParameters(CREATED, createdBefore); + return remove(sc); + } + } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/FirewallRuleDetailVO.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/FirewallRuleDetailVO.java index 636d889fafe..1149d0b13e7 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/FirewallRuleDetailVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/FirewallRuleDetailVO.java @@ -79,4 +79,8 @@ public class FirewallRuleDetailVO implements ResourceDetail { public boolean isDisplay() { return display; } + + public void setValue(String value) { + this.value = value; + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java index 5a173191be1..8f3d264da98 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java @@ -97,4 +97,6 @@ public interface ResourceDetailsDao extends GenericDao public void addDetail(long resourceId, String key, String value, boolean display); public List findResourceIdsByNameAndValueIn(String name, Object[] values); + + public long batchExpungeForResources(List ids, Long batchSize); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java index 37ebfebf5dd..4205a7823e4 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java @@ -21,13 +21,14 @@ import java.util.List; import java.util.Map; import org.apache.cloudstack.api.ResourceDetail; +import org.apache.commons.collections.CollectionUtils; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.TransactionLegacy; public abstract class ResourceDetailsDaoBase extends GenericDaoBase implements ResourceDetailsDao { private SearchBuilder AllFieldsSearch; @@ -201,4 +202,17 @@ public abstract class ResourceDetailsDaoBase extends G return customSearch(sc, null); } + + @Override + public long batchExpungeForResources(final List ids, final Long batchSize) { + if (CollectionUtils.isEmpty(ids)) { + return 0; + } + SearchBuilder sb = createSearchBuilder(); + sb.and("ids", sb.entity().getResourceId(), Op.IN); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("ids", ids.toArray()); + return batchExpunge(sc, batchSize); + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/UserDetailVO.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/UserDetailVO.java index 1b430e806e2..d0cfcc3d439 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/UserDetailVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/UserDetailVO.java @@ -46,6 +46,8 @@ public class UserDetailVO implements ResourceDetail { private boolean display = true; public static final String Setup2FADetail = "2FASetupStatus"; + public static final String PasswordResetToken = "PasswordResetToken"; + public static final String PasswordResetTokenExpiryDate = "PasswordResetTokenExpiryDate"; public UserDetailVO() { } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDetailsDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDetailsDaoImpl.java index 8e5ce770f45..14830490600 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDetailsDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDetailsDaoImpl.java @@ -20,6 +20,11 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.ConfigKey.Scope; +import org.apache.cloudstack.framework.config.ScopedConfigStorage; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; import org.springframework.stereotype.Component; import com.cloud.utils.crypt.DBEncryptionUtil; @@ -29,12 +34,6 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.TransactionLegacy; -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.framework.config.ConfigKey; -import org.apache.cloudstack.framework.config.ConfigKey.Scope; -import org.apache.cloudstack.framework.config.ScopedConfigStorage; -import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; - @Component public class ImageStoreDetailsDaoImpl extends ResourceDetailsDaoBase implements ImageStoreDetailsDao, ScopedConfigStorage { @@ -106,8 +105,8 @@ public class ImageStoreDetailsDaoImpl extends ResourceDetailsDaoBase key) { - ImageStoreDetailVO vo = findDetail(id, key.key()); + public String getConfigValue(long id, String key) { + ImageStoreDetailVO vo = findDetail(id, key); return vo == null ? null : vo.getValue(); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java index d42e863cbed..f0c235e842c 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java @@ -127,6 +127,10 @@ public interface PrimaryDataStoreDao extends GenericDao { List findZoneWideStoragePoolsByHypervisor(long dataCenterId, HypervisorType hypervisorType, String keyword); + List findZoneWideStoragePoolsByHypervisorAndPoolType(long dataCenterId, HypervisorType hypervisorType, Storage.StoragePoolType poolType); + + List findClusterWideStoragePoolsByHypervisorAndPoolType(long clusterId, HypervisorType hypervisorType, Storage.StoragePoolType poolType); + List findLocalStoragePoolsByHostAndTags(long hostId, String[] tags); List listLocalStoragePoolByPath(long datacenterId, String path); @@ -141,6 +145,8 @@ public interface PrimaryDataStoreDao extends GenericDao { List findPoolsByStorageType(Storage.StoragePoolType storageType); + StoragePoolVO findPoolByZoneAndPath(long zoneId, String datastorePath); + List listStoragePoolsWithActiveVolumesByOfferingId(long offeringid); Pair, Integer> searchForIdsAndCount(Long storagePoolId, String storagePoolName, Long zoneId, diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java index e4dd66a86b8..1658fe0a537 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java @@ -28,6 +28,7 @@ import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.storage.Storage; import com.cloud.utils.Pair; import com.cloud.utils.db.Filter; import org.apache.commons.collections.CollectionUtils; @@ -35,7 +36,6 @@ import org.apache.commons.collections.CollectionUtils; import com.cloud.host.Status; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.ScopeType; -import com.cloud.storage.Storage; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.StoragePoolTagVO; @@ -622,6 +622,28 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase return sc.list(); } + @Override + public List findZoneWideStoragePoolsByHypervisorAndPoolType(long dataCenterId, HypervisorType hypervisorType, Storage.StoragePoolType poolType) { + QueryBuilder sc = QueryBuilder.create(StoragePoolVO.class); + sc.and(sc.entity().getDataCenterId(), Op.EQ, dataCenterId); + sc.and(sc.entity().getStatus(), Op.EQ, StoragePoolStatus.Up); + sc.and(sc.entity().getScope(), Op.EQ, ScopeType.ZONE); + sc.and(sc.entity().getHypervisor(), Op.EQ, hypervisorType); + sc.and(sc.entity().getPoolType(), Op.EQ, poolType); + return sc.list(); + } + + @Override + public List findClusterWideStoragePoolsByHypervisorAndPoolType(long clusterId, HypervisorType hypervisorType, Storage.StoragePoolType poolType) { + QueryBuilder sc = QueryBuilder.create(StoragePoolVO.class); + sc.and(sc.entity().getClusterId(), Op.EQ, clusterId); + sc.and(sc.entity().getStatus(), Op.EQ, StoragePoolStatus.Up); + sc.and(sc.entity().getScope(), Op.EQ, ScopeType.CLUSTER); + sc.and(sc.entity().getHypervisor(), Op.EQ, hypervisorType); + sc.and(sc.entity().getPoolType(), Op.EQ, poolType); + return sc.list(); + } + @Override public void deletePoolTags(long poolId) { _tagsDao.deleteTags(poolId); @@ -660,6 +682,16 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase return listBy(sc); } + @Override + public StoragePoolVO findPoolByZoneAndPath(long zoneId, String datastorePath) { + SearchCriteria sc = AllFieldSearch.create(); + sc.setParameters("datacenterId", zoneId); + if (datastorePath != null) { + sc.addAnd("path", Op.LIKE, "%/" + datastorePath); + } + return findOneBy(sc); + } + @Override public List listStoragePoolsWithActiveVolumesByOfferingId(long offeringId) { TransactionLegacy txn = TransactionLegacy.currentTxn(); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java index 344ff8b2a69..4cd29b465ee 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java @@ -106,4 +106,6 @@ StateDao snapshotIds, Long batchSize); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java index c095f4222e7..5bf67eb3881 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java @@ -559,4 +559,16 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase snapshotIds, final Long batchSize) { + if (CollectionUtils.isEmpty(snapshotIds)) { + return 0; + } + SearchBuilder sb = createSearchBuilder(); + sb.and("snapshotIds", sb.entity().getSnapshotId(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("snapshotIds", snapshotIds.toArray()); + return batchExpunge(sc, batchSize); + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreVO.java index a1dc05fce58..7a466c1f505 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreVO.java @@ -86,6 +86,13 @@ public class SnapshotDataStoreVO implements StateObject, List listByVolume(long volumeId, long storeId); List listByStoreIdAndInstallPaths(Long storeId, List paths); + + int expungeByVolumeList(List volumeIds, Long batchSize); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java new file mode 100644 index 00000000000..3b869a5429f --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSVO.java @@ -0,0 +1,238 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.sharedfs; + + +import java.util.Date; +import java.util.UUID; + +import com.cloud.utils.db.GenericDao; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.EnumType; +import javax.persistence.Enumerated; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.Temporal; +import javax.persistence.TemporalType; + +@Entity +@Table(name = "shared_filesystem") +public class SharedFSVO implements SharedFS { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "name") + private String name; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "description") + private String description; + + @Column(name = "domain_id") + private long domainId; + + @Column(name = "account_id") + private long accountId; + + @Column(name = "data_center_id") + private long dataCenterId; + + @Column(name = "state") + @Enumerated(value = EnumType.STRING) + private State state; + + @Column(name = "fs_provider_name") + private String fsProviderName; + + @Column(name = "protocol") + @Enumerated(value = EnumType.STRING) + private Protocol protocol; + + @Column(name = "volume_id") + private Long volumeId; + + @Column(name = "vm_id") + private Long vmId; + + @Column(name = "fs_type") + @Enumerated(value = EnumType.STRING) + private FileSystemType fsType; + + @Column(name = "service_offering_id") + private Long serviceOfferingId; + + @Column(name = "updated") + @Temporal(value = TemporalType.TIMESTAMP) + Date updated; + + @Column(name = "update_count", updatable = true, nullable = false) + protected long updatedCount; // This field should be updated everytime the + // state is updated. There's no set method in + // the vo object because it is done with in the + // dao code. + + @Column(name = GenericDao.CREATED_COLUMN) + protected Date created; + + @Column(name = GenericDao.REMOVED_COLUMN) + protected Date removed; + + public SharedFSVO() { + } + + public SharedFSVO(String name, String description, long domainId, long accountId, long dataCenterId, + String fsProviderName, Protocol protocol, FileSystemType fsType, Long serviceOfferingId) { + this.name = name; + this.description = description; + this.domainId = domainId; + this.accountId = accountId; + this.dataCenterId = dataCenterId; + this.fsProviderName = fsProviderName; + this.protocol = protocol; + this.state = State.Allocated; + this.fsType = fsType; + this.serviceOfferingId = serviceOfferingId; + this.uuid = UUID.randomUUID().toString(); + } + + @Override + public Class getEntityType() { + return SharedFS.class; + } + + @Override + public long getId() { + return id; + } + + @Override + public String getName() { + return name; + } + + @Override + public void setName(String name) { + this.name = name; + } + + @Override + public String getUuid() { + return uuid; + } + + @Override + public String getDescription() { + return description; + } + + @Override + public void setDescription(String description) { + this.description = description; + } + + @Override + public long getDomainId() { + return domainId; + } + + @Override + public long getAccountId() { + return accountId; + } + + @Override + public Long getDataCenterId() { + return dataCenterId; + } + + @Override + public State getState() { + return state; + } + + @Override + public String getFsProviderName() { + return fsProviderName; + } + + @Override + public Protocol getProtocol() { + return protocol; + } + + @Override + public Long getVolumeId() { + return volumeId; + } + + @Override + public void setVolumeId(Long volumeId) { + this.volumeId = volumeId; + } + + @Override + public Long getVmId() { + return vmId; + } + + @Override + public void setVmId(Long vmId) { + this.vmId = vmId; + } + + @Override + public FileSystemType getFsType() { + return fsType; + } + + @Override + public Long getServiceOfferingId() { + return serviceOfferingId; + } + + @Override + public void setServiceOfferingId(Long serviceOfferingId) { + this.serviceOfferingId = serviceOfferingId; + } + + @Override + public Date getUpdated() { + return updated; + } + + @Override + public long getUpdatedCount() { + return updatedCount; + } + + @Override + public void incrUpdatedCount() { + updatedCount++; + } + +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/dao/SharedFSDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/dao/SharedFSDao.java new file mode 100644 index 00000000000..4735202a762 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/dao/SharedFSDao.java @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.sharedfs.dao; + +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.SharedFSVO; + +import com.cloud.utils.db.GenericDao; +import com.cloud.utils.fsm.StateDao; + +import java.util.Date; +import java.util.List; + +public interface SharedFSDao extends GenericDao, StateDao { + List listSharedFSToBeDestroyed(Date date); + + SharedFSVO findSharedFSByNameAccountDomain(String name, Long accountId, Long domainId); +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/dao/SharedFSDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/dao/SharedFSDaoImpl.java new file mode 100644 index 00000000000..da622071671 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/sharedfs/dao/SharedFSDaoImpl.java @@ -0,0 +1,117 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.sharedfs.dao; + +import com.cloud.network.dao.NetworkDao; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.UpdateBuilder; + +import org.apache.cloudstack.engine.cloud.entity.api.db.dao.VMNetworkMapDao; +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.SharedFSVO; + +import javax.inject.Inject; +import java.util.Date; +import java.util.List; + +public class SharedFSDaoImpl extends GenericDaoBase implements SharedFSDao { + + @Inject + VMNetworkMapDao vmNetworkMapDao; + + @Inject + NetworkDao networkDao; + + protected final SearchBuilder StateUpdateCountSearch; + + protected final SearchBuilder DestroyedByTimeSearch; + + protected final SearchBuilder NameAccountDomainSearch; + + public SharedFSDaoImpl() { + StateUpdateCountSearch = createSearchBuilder(); + StateUpdateCountSearch.and("id", StateUpdateCountSearch.entity().getId(), SearchCriteria.Op.EQ); + StateUpdateCountSearch.and("state", StateUpdateCountSearch.entity().getState(), SearchCriteria.Op.EQ); + StateUpdateCountSearch.and("updatedCount", StateUpdateCountSearch.entity().getUpdatedCount(), SearchCriteria.Op.EQ); + StateUpdateCountSearch.done(); + + DestroyedByTimeSearch = createSearchBuilder(); + DestroyedByTimeSearch.and("state", DestroyedByTimeSearch.entity().getState(), SearchCriteria.Op.IN); + DestroyedByTimeSearch.and("accountId", DestroyedByTimeSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + DestroyedByTimeSearch.done(); + + NameAccountDomainSearch = createSearchBuilder(); + NameAccountDomainSearch.and("name", NameAccountDomainSearch.entity().getName(), SearchCriteria.Op.EQ); + NameAccountDomainSearch.and("accountId", NameAccountDomainSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + NameAccountDomainSearch.and("domainId", NameAccountDomainSearch.entity().getDomainId(), SearchCriteria.Op.EQ); + NameAccountDomainSearch.done(); + } + + @Override + public boolean updateState(SharedFS.State currentState, SharedFS.Event event, SharedFS.State nextState, SharedFS vo, Object data) { + + Long oldUpdated = vo.getUpdatedCount(); + Date oldUpdatedTime = vo.getUpdated(); + + SearchCriteria sc = StateUpdateCountSearch.create(); + sc.setParameters("id", vo.getId()); + sc.setParameters("state", currentState); + sc.setParameters("updatedCount", vo.getUpdatedCount()); + + vo.incrUpdatedCount(); + + UpdateBuilder builder = getUpdateBuilder(vo); + builder.set(vo, "state", nextState); + builder.set(vo, "updated", new Date()); + + int rows = update((SharedFSVO) vo, sc); + if (rows == 0 && logger.isDebugEnabled()) { + SharedFSVO dbSharedFS = findByIdIncludingRemoved(vo.getId()); + if (dbSharedFS != null) { + StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); + str.append(": DB Data={id=").append(dbSharedFS.getId()).append("; state=").append(dbSharedFS.getState()).append("; updatecount=").append(dbSharedFS.getUpdatedCount()).append(";updatedTime=") + .append(dbSharedFS.getUpdated()); + str.append(": New Data={id=").append(vo.getId()).append("; state=").append(nextState).append("; event=").append(event).append("; updatecount=").append(vo.getUpdatedCount()) + .append("; updatedTime=").append(vo.getUpdated()); + str.append(": stale Data={id=").append(vo.getId()).append("; state=").append(currentState).append("; event=").append(event).append("; updatecount=").append(oldUpdated) + .append("; updatedTime=").append(oldUpdatedTime); + } else { + logger.debug("Unable to update sharedfs: id=" + vo.getId() + ", as it is not present in the database anymore"); + } + } + return rows > 0; + } + + @Override + public List listSharedFSToBeDestroyed(Date date) { + SearchCriteria sc = DestroyedByTimeSearch.create(); + sc.setParameters("state", SharedFS.State.Destroyed, SharedFS.State.Expunging, SharedFS.State.Error); + sc.setParameters("updateTime", date); + return listBy(sc); + } + + @Override + public SharedFSVO findSharedFSByNameAccountDomain(String name, Long accountId, Long domainId) { + SearchCriteria sc = NameAccountDomainSearch.create(); + sc.setParameters("name", name); + sc.setParameters("accountId", accountId); + sc.setParameters("domainId", domainId); + return findOneBy(sc); + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/util/CPUArchConverter.java b/engine/schema/src/main/java/org/apache/cloudstack/util/CPUArchConverter.java new file mode 100644 index 00000000000..e278809fb96 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/util/CPUArchConverter.java @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.util; + +import com.cloud.cpu.CPU; + +import javax.persistence.AttributeConverter; +import javax.persistence.Converter; + +@Converter +public class CPUArchConverter implements AttributeConverter { + + @Override + public String convertToDatabaseColumn(CPU.CPUArch cpuArch) { + return cpuArch == null ? CPU.CPUArch.amd64.getType() : cpuArch.getType(); + } + + @Override + public CPU.CPUArch convertToEntityAttribute(String attribute) { + return CPU.CPUArch.fromType(attribute); + } +} diff --git a/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml b/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml index 8ab60a76624..171685ce413 100644 --- a/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml +++ b/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml @@ -269,6 +269,7 @@ + @@ -289,4 +290,13 @@ + + + + + + + + + diff --git a/engine/schema/src/main/resources/META-INF/db/data-217to218.sql b/engine/schema/src/main/resources/META-INF/db/data-217to218.sql index 5c1253143f4..1a03e9b7998 100755 --- a/engine/schema/src/main/resources/META-INF/db/data-217to218.sql +++ b/engine/schema/src/main/resources/META-INF/db/data-217to218.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.add_guest_os_and_hypervisor_mapping.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.add_guest_os_and_hypervisor_mapping.sql new file mode 100644 index 00000000000..efe56bccf2d --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.add_guest_os_and_hypervisor_mapping.sql @@ -0,0 +1,49 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- PR#4699 Drop the procedure `ADD_GUEST_OS_AND_HYPERVISOR_MAPPING` if it already exist. +DROP PROCEDURE IF EXISTS `cloud`.`ADD_GUEST_OS_AND_HYPERVISOR_MAPPING`; + +-- PR#4699 Create the procedure `ADD_GUEST_OS_AND_HYPERVISOR_MAPPING` to add guest_os and guest_os_hypervisor mapping. +CREATE PROCEDURE `cloud`.`ADD_GUEST_OS_AND_HYPERVISOR_MAPPING` ( + IN guest_os_category_id bigint(20) unsigned, + IN guest_os_display_name VARCHAR(255), + IN guest_os_hypervisor_hypervisor_type VARCHAR(32), + IN guest_os_hypervisor_hypervisor_version VARCHAR(32), + IN guest_os_hypervisor_guest_os_name VARCHAR(255) + ) +BEGIN +INSERT INTO cloud.guest_os (uuid, category_id, display_name, created) +SELECT UUID(), guest_os_category_id, guest_os_display_name, now() +FROM DUAL +WHERE not exists( SELECT 1 + FROM cloud.guest_os + WHERE cloud.guest_os.category_id = guest_os_category_id + AND cloud.guest_os.display_name = guest_os_display_name) + +; INSERT INTO cloud.guest_os_hypervisor (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created) + SELECT UUID(), guest_os_hypervisor_hypervisor_type, guest_os_hypervisor_hypervisor_version, guest_os_hypervisor_guest_os_name, guest_os.id, now() + FROM cloud.guest_os + WHERE guest_os.category_id = guest_os_category_id + AND guest_os.display_name = guest_os_display_name + AND NOT EXISTS (SELECT 1 + FROM cloud.guest_os_hypervisor as hypervisor + WHERE hypervisor_type = guest_os_hypervisor_hypervisor_type + AND hypervisor_version = guest_os_hypervisor_hypervisor_version + AND hypervisor.guest_os_id = guest_os.id + AND hypervisor.guest_os_name = guest_os_hypervisor_guest_os_name) +;END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_add_column.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_add_column.sql new file mode 100644 index 00000000000..7872f60b2db --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_add_column.sql @@ -0,0 +1,27 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- in cloud +DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_ADD_COLUMN`; +CREATE PROCEDURE `cloud`.`IDEMPOTENT_ADD_COLUMN` ( + IN in_table_name VARCHAR(200), + IN in_column_name VARCHAR(200), + IN in_column_definition VARCHAR(1000) +) +BEGIN + + DECLARE CONTINUE HANDLER FOR 1060 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', 'ADD COLUMN') ; SET @ddl = CONCAT(@ddl, ' ', in_column_name); SET @ddl = CONCAT(@ddl, ' ', in_column_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_add_key.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_add_key.sql new file mode 100644 index 00000000000..8083080088e --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_add_key.sql @@ -0,0 +1,27 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_ADD_KEY`; + +CREATE PROCEDURE `cloud`.`IDEMPOTENT_ADD_KEY` ( + IN in_index_name VARCHAR(200) + , IN in_table_name VARCHAR(200) + , IN in_key_definition VARCHAR(1000) +) +BEGIN + + DECLARE CONTINUE HANDLER FOR 1061 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', ' ADD KEY ') ; SET @ddl = CONCAT(@ddl, ' ', in_index_name); SET @ddl = CONCAT(@ddl, ' ', in_key_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_add_unique_index.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_add_unique_index.sql new file mode 100644 index 00000000000..22f490ad0fa --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_add_unique_index.sql @@ -0,0 +1,26 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- Idempotent ADD UNIQUE INDEX +DROP PROCEDURE IF EXISTS `cloud_usage`.`IDEMPOTENT_ADD_UNIQUE_INDEX`; +CREATE PROCEDURE `cloud_usage`.`IDEMPOTENT_ADD_UNIQUE_INDEX` ( + IN in_table_name VARCHAR(200) +, IN in_index_name VARCHAR(200) +, IN in_index_definition VARCHAR(1000) +) +BEGIN + DECLARE CONTINUE HANDLER FOR 1061 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', 'ADD UNIQUE INDEX ', in_index_name); SET @ddl = CONCAT(@ddl, ' ', in_index_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_add_unique_key.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_add_unique_key.sql new file mode 100644 index 00000000000..5d4cbf6c770 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_add_unique_key.sql @@ -0,0 +1,26 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- Idempotent ADD UNIQUE KEY +DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_ADD_UNIQUE_KEY`; +CREATE PROCEDURE `cloud`.`IDEMPOTENT_ADD_UNIQUE_KEY` ( + IN in_table_name VARCHAR(200) +, IN in_key_name VARCHAR(200) +, IN in_key_definition VARCHAR(1000) +) +BEGIN + DECLARE CONTINUE HANDLER FOR 1061 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', 'ADD UNIQUE KEY ', in_key_name); SET @ddl = CONCAT(@ddl, ' ', in_key_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_change_column.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_change_column.sql new file mode 100644 index 00000000000..d63c92e6b81 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_change_column.sql @@ -0,0 +1,27 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- in usage Idempotent CHANGE COLUMN +DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_CHANGE_COLUMN`; +CREATE PROCEDURE `cloud`.`IDEMPOTENT_CHANGE_COLUMN` ( + IN in_table_name VARCHAR(200) + , IN in_column_name VARCHAR(200) + , IN in_column_new_name VARCHAR(200) + , IN in_column_new_definition VARCHAR(1000) +) +BEGIN + DECLARE CONTINUE HANDLER FOR 1054 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', 'CHANGE COLUMN') ; SET @ddl = CONCAT(@ddl, ' ', in_column_name); SET @ddl = CONCAT(@ddl, ' ', in_column_new_name); SET @ddl = CONCAT(@ddl, ' ', in_column_new_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_create_unique_index.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_create_unique_index.sql new file mode 100644 index 00000000000..167b17412f1 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_create_unique_index.sql @@ -0,0 +1,27 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_CREATE_UNIQUE_INDEX`; + +CREATE PROCEDURE `cloud`.`IDEMPOTENT_CREATE_UNIQUE_INDEX` ( + IN in_index_name VARCHAR(200) + , IN in_table_name VARCHAR(200) + , IN in_index_definition VARCHAR(1000) +) +BEGIN + + DECLARE CONTINUE HANDLER FOR 1061 BEGIN END; SET @ddl = CONCAT('CREATE UNIQUE INDEX ', in_index_name); SET @ddl = CONCAT(@ddl, ' ', ' ON ') ; SET @ddl = CONCAT(@ddl, ' ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', in_index_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_drop_foreign_key.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_drop_foreign_key.sql new file mode 100644 index 00000000000..0ba0a411ca9 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_drop_foreign_key.sql @@ -0,0 +1,25 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_DROP_FOREIGN_KEY`; + +CREATE PROCEDURE `cloud`.`IDEMPOTENT_DROP_FOREIGN_KEY` ( + IN in_table_name VARCHAR(200), + IN in_foreign_key_name VARCHAR(200) +) +BEGIN + DECLARE CONTINUE HANDLER FOR 1091, 1025 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', ' DROP FOREIGN KEY '); SET @ddl = CONCAT(@ddl, ' ', in_foreign_key_name); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_drop_index.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_drop_index.sql new file mode 100644 index 00000000000..1e1afd26597 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_drop_index.sql @@ -0,0 +1,27 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- in cloud +DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_DROP_INDEX`; + +CREATE PROCEDURE `cloud`.`IDEMPOTENT_DROP_INDEX` ( + IN in_index_name VARCHAR(200) + , IN in_table_name VARCHAR(200) +) +BEGIN + + DECLARE CONTINUE HANDLER FOR 1091 BEGIN END; SET @ddl = CONCAT('DROP INDEX ', in_index_name); SET @ddl = CONCAT(@ddl, ' ', ' ON ') ; SET @ddl = CONCAT(@ddl, ' ', in_table_name); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_insert_guestos_hypervisor_mapping.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_insert_guestos_hypervisor_mapping.sql new file mode 100644 index 00000000000..7af96f56145 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.idempotent_insert_guestos_hypervisor_mapping.sql @@ -0,0 +1,48 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_INSERT_GUESTOS_HYPERVISOR_MAPPING`; + +CREATE PROCEDURE `cloud`.`IDEMPOTENT_INSERT_GUESTOS_HYPERVISOR_MAPPING`( + IN in_hypervisor_type VARCHAR(32), + IN in_hypervisor_version VARCHAR(32), + IN in_guest_os_name VARCHAR(255), + IN in_guest_os_id BIGINT(20) UNSIGNED, + IN is_user_defined int(1) UNSIGNED) +BEGIN + IF NOT EXISTS ((SELECT * FROM `cloud`.`guest_os_hypervisor` WHERE + hypervisor_type=in_hypervisor_type AND + hypervisor_version=in_hypervisor_version AND + guest_os_id = in_guest_os_id)) + THEN + INSERT INTO `cloud`.`guest_os_hypervisor` ( + uuid, + hypervisor_type, + hypervisor_version, + guest_os_name, + guest_os_id, + created, + is_user_defined) + VALUES ( + UUID(), + in_hypervisor_type, + in_hypervisor_version, + in_guest_os_name, + in_guest_os_id, + utc_timestamp(), + is_user_defined + ); END IF; END;; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/usage.idempotent_add_column.sql b/engine/schema/src/main/resources/META-INF/db/procedures/usage.idempotent_add_column.sql new file mode 100644 index 00000000000..e257b64bd8a --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/usage.idempotent_add_column.sql @@ -0,0 +1,26 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- in usage +DROP PROCEDURE IF EXISTS `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`; +CREATE PROCEDURE `cloud_usage`.`IDEMPOTENT_ADD_COLUMN` ( + IN in_table_name VARCHAR(200) +, IN in_column_name VARCHAR(200) +, IN in_column_definition VARCHAR(1000) +) +BEGIN + DECLARE CONTINUE HANDLER FOR 1060 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', 'ADD COLUMN') ; SET @ddl = CONCAT(@ddl, ' ', in_column_name); SET @ddl = CONCAT(@ddl, ' ', in_column_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/usage.idempotent_change_column.sql b/engine/schema/src/main/resources/META-INF/db/procedures/usage.idempotent_change_column.sql new file mode 100644 index 00000000000..a47b6a1fbf9 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/usage.idempotent_change_column.sql @@ -0,0 +1,27 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- in usage Idempotent CHANGE COLUMN +DROP PROCEDURE IF EXISTS `cloud_usage`.`IDEMPOTENT_CHANGE_COLUMN`; +CREATE PROCEDURE `cloud_usage`.`IDEMPOTENT_CHANGE_COLUMN` ( + IN in_table_name VARCHAR(200) + , IN in_old_column_name VARCHAR(200) + , IN in_new_column_name VARCHAR(200) + , IN in_column_definition VARCHAR(1000) +) +BEGIN + DECLARE CONTINUE HANDLER FOR 1060 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', ' CHANGE COLUMN') ; SET @ddl = CONCAT(@ddl, ' ', in_old_column_name); SET @ddl = CONCAT(@ddl, ' ', in_new_column_name); SET @ddl = CONCAT(@ddl, ' ', in_column_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/usage.idempotent_drop_index.sql b/engine/schema/src/main/resources/META-INF/db/procedures/usage.idempotent_drop_index.sql new file mode 100644 index 00000000000..f824ebad98e --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/usage.idempotent_drop_index.sql @@ -0,0 +1,25 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- in usage +DROP PROCEDURE IF EXISTS `cloud_usage`.`IDEMPOTENT_DROP_INDEX`; +CREATE PROCEDURE `cloud_usage`.`IDEMPOTENT_DROP_INDEX` ( + IN in_index_name VARCHAR(200) +, IN in_table_name VARCHAR(200) +) +BEGIN + DECLARE CONTINUE HANDLER FOR 1091 BEGIN END; SET @ddl = CONCAT('DROP INDEX ', in_index_name); SET @ddl = CONCAT(@ddl, ' ', ' ON ') ; SET @ddl = CONCAT(@ddl, ' ', in_table_name); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-20to21.sql b/engine/schema/src/main/resources/META-INF/db/schema-20to21.sql index 7013046ca43..000d0f077cc 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-20to21.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-20to21.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -141,14 +141,14 @@ ALTER TABLE `cloud`.`host` ADD COLUMN `cluster_id` bigint unsigned; -- -- enforced in postporcess-20to21.sql -ALTER TABLE `cloud`.`host_pod_ref` ADD COLUMN `gateway` varchar(255); -- need to migrage data with user input +ALTER TABLE `cloud`.`host_pod_ref` ADD COLUMN `gateway` varchar(255); -- need to migrage data with user input -ALTER TABLE `cloud`.`service_offering` ADD COLUMN `recreatable` tinyint(1) unsigned NOT NULL DEFAULT 0; +ALTER TABLE `cloud`.`service_offering` ADD COLUMN `recreatable` tinyint(1) unsigned NOT NULL DEFAULT 0; ALTER TABLE `cloud`.`service_offering` ADD COLUMN `tags` varchar(255); -ALTER TABLE `cloud`.`user_vm` MODIFY COLUMN `domain_router_id` bigint unsigned; -- change from NOT NULL to NULL +ALTER TABLE `cloud`.`user_vm` MODIFY COLUMN `domain_router_id` bigint unsigned; -- change from NOT NULL to NULL -ALTER TABLE `cloud`.`event` ADD COLUMN `state` varchar(32) NOT NULL DEFAULT 'Completed'; +ALTER TABLE `cloud`.`event` ADD COLUMN `state` varchar(32) NOT NULL DEFAULT 'Completed'; ALTER TABLE `cloud`.`event` ADD COLUMN `start_id` bigint unsigned NOT NULL DEFAULT 0; ALTER TABLE `cloud`.`disk_offering` ADD COLUMN `tags` varchar(4096); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-217to218.sql b/engine/schema/src/main/resources/META-INF/db/schema-217to218.sql index f2b6b291f78..006a3f1cd7b 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-217to218.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-217to218.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-21to22-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-21to22-cleanup.sql index c8757833fab..8a3ca39d5e7 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-21to22-cleanup.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-21to22-cleanup.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-21to22-premium.sql b/engine/schema/src/main/resources/META-INF/db/schema-21to22-premium.sql index 45202840565..a34b65dc8ab 100755 --- a/engine/schema/src/main/resources/META-INF/db/schema-21to22-premium.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-21to22-premium.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -50,7 +50,7 @@ CREATE TABLE `cloud_usage`.`usage_event` ( `resource_name` varchar(255), `offering_id` bigint unsigned, `template_id` bigint unsigned, - `size` bigint unsigned, + `size` bigint unsigned, `processed` tinyint NOT NULL default '0', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-21to22.sql b/engine/schema/src/main/resources/META-INF/db/schema-21to22.sql index eb473cfc7f6..8da29caae53 100755 --- a/engine/schema/src/main/resources/META-INF/db/schema-21to22.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-21to22.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -22,7 +22,7 @@ ALTER TABLE `cloud`.`cluster` ADD COLUMN `guid` varchar(255) UNIQUE DEFAULT NULL ALTER TABLE `cloud`.`cluster` ADD COLUMN `cluster_type` varchar(64) DEFAULT 'CloudManaged'; ALTER TABLE `cloud`.`vm_template` ADD COLUMN `hypervisor_type` varchar(32) COMMENT 'hypervisor that the template is belonged to'; ALTER TABLE `cloud`.`vm_template` ADD COLUMN `extractable` int(1) unsigned NOT NULL default 0 COMMENT 'Is this template extractable'; -ALTER TABLE `cloud`.`template_spool_ref` ADD CONSTRAINT `fk_template_spool_ref__template_id` FOREIGN KEY (`template_id`) REFERENCES `vm_template`(`id`); +ALTER TABLE `cloud`.`template_spool_ref` ADD CONSTRAINT `fk_template_spool_ref__template_id` FOREIGN KEY (`template_id`) REFERENCES `vm_template`(`id`); ALTER TABLE `cloud`.`guest_os` modify `name` varchar(255) ; @@ -104,7 +104,7 @@ CREATE TABLE `cloud`.`networks` ( `broadcast_domain_type` varchar(32) NOT NULL COMMENT 'type of broadcast domain used', `broadcast_uri` varchar(255) COMMENT 'broadcast domain specifier', `gateway` varchar(15) COMMENT 'gateway for this network configuration', - `cidr` varchar(18) COMMENT 'network cidr', + `cidr` varchar(18) COMMENT 'network cidr', `mode` varchar(32) COMMENT 'How to retrieve ip address in this network', `network_offering_id` bigint unsigned NOT NULL COMMENT 'network offering id that this configuration is created from', `data_center_id` bigint unsigned NOT NULL COMMENT 'data center id that this configuration is used in', @@ -167,7 +167,7 @@ CREATE TABLE `cloud`.`nics` ( `ip_type` varchar(32) COMMENT 'type of ip', `broadcast_uri` varchar(255) COMMENT 'broadcast uri', `network_id` bigint unsigned NOT NULL COMMENT 'network configuration id', - `mode` varchar(32) COMMENT 'mode of getting ip address', + `mode` varchar(32) COMMENT 'mode of getting ip address', `state` varchar(32) NOT NULL COMMENT 'state of the creation', `strategy` varchar(32) NOT NULL COMMENT 'reservation strategy', `reserver_name` varchar(255) COMMENT 'Name of the component that reserved the ip address', @@ -176,7 +176,7 @@ CREATE TABLE `cloud`.`nics` ( `update_time` timestamp NOT NULL COMMENT 'time the state was changed', `isolation_uri` varchar(255) COMMENT 'id for isolation', `ip6_address` char(40) COMMENT 'ip6 address', - `default_nic` tinyint NOT NULL COMMENT "None", + `default_nic` tinyint NOT NULL COMMENT "None", `created` datetime NOT NULL COMMENT 'date created', `removed` datetime COMMENT 'date removed if not null', PRIMARY KEY (`id`), @@ -253,7 +253,7 @@ CREATE TABLE `cloud`.`op_host` ( `id` bigint unsigned NOT NULL UNIQUE COMMENT 'host id', `sequence` bigint unsigned DEFAULT 1 NOT NULL COMMENT 'sequence for the host communication', PRIMARY KEY (`id`), - CONSTRAINT `fk_op_host__id` FOREIGN KEY (`id`) REFERENCES `host`(`id`) ON DELETE CASCADE + CONSTRAINT `fk_op_host__id` FOREIGN KEY (`id`) REFERENCES `host`(`id`) ON DELETE CASCADE ) ENGINE = InnoDB DEFAULT CHARSET=utf8; CREATE TABLE `cloud`.`guest_os_hypervisor` ( @@ -261,7 +261,7 @@ CREATE TABLE `cloud`.`guest_os_hypervisor` ( `hypervisor_type` varchar(32) NOT NULL, `guest_os_name` varchar(255) NOT NULL, `guest_os_id` bigint unsigned NOT NULL, - PRIMARY KEY (`id`) + PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; INSERT INTO op_host(id, sequence) select id, sequence from host; @@ -269,7 +269,7 @@ INSERT INTO op_host(id, sequence) select id, sequence from host; -- Alter Tables to add Columns; ALTER TABLE `cloud`.`cluster` ADD COLUMN `hypervisor_type` varchar(32); -UPDATE `cloud`.`cluster` SET hypervisor_type=(SELECT DISTINCT host.hypervisor_type from host where host.cluster_id = cluster.id GROUP BY host.hypervisor_type); +UPDATE `cloud`.`cluster` SET hypervisor_type=(SELECT DISTINCT host.hypervisor_type from host where host.cluster_id = cluster.id GROUP BY host.hypervisor_type); ALTER TABLE `cloud`.`volumes` ADD COLUMN `attached` datetime; UPDATE `cloud`.`volumes` SET attached=now() WHERE removed IS NULL AND instance_id IS NOT NULL; @@ -286,7 +286,7 @@ ALTER TABLE `cloud`.`vlan` ADD COLUMN `network_id` bigint unsigned NOT NULL; ALTER TABLE `cloud`.`data_center` ADD COLUMN `domain` varchar(100); ALTER TABLE `cloud`.`data_center` ADD COLUMN `domain_id` bigint unsigned; -ALTER TABLE `cloud`.`data_center` ADD COLUMN `networktype` varchar(255) NOT NULL DEFAULT 'Basic'; +ALTER TABLE `cloud`.`data_center` ADD COLUMN `networktype` varchar(255) NOT NULL DEFAULT 'Basic'; ALTER TABLE `cloud`.`data_center` ADD COLUMN `dns_provider` char(64) DEFAULT 'VirtualRouter'; ALTER TABLE `cloud`.`data_center` ADD COLUMN `gateway_provider` char(64) DEFAULT 'VirtualRouter'; ALTER TABLE `cloud`.`data_center` ADD COLUMN `firewall_provider` char(64) DEFAULT 'VirtualRouter'; @@ -306,7 +306,7 @@ UPDATE `cloud`.`op_dc_link_local_ip_address_alloc` SET reservation_id=concat(cas ALTER TABLE `cloud`.`host_pod_ref` ADD COLUMN `enabled` tinyint NOT NULL DEFAULT 1; ALTER TABLE `cloud`.`op_dc_vnet_alloc` ADD COLUMN `reservation_id` char(40) NULL; -UPDATE op_dc_vnet_alloc set reservation_id=concat(cast(data_center_id as CHAR), concat("-", vnet)) WHERE taken is NOT NULL; +UPDATE op_dc_vnet_alloc set reservation_id=concat(cast(data_center_id as CHAR), concat("-", vnet)) WHERE taken is NOT NULL; ALTER TABLE `cloud`.`vm_instance` ADD COLUMN `service_offering_id` bigint unsigned NOT NULL; ALTER TABLE `cloud`.`vm_instance` ADD COLUMN `reservation_id` char(40); @@ -326,7 +326,7 @@ ALTER TABLE `cloud`.`user_vm` ADD COLUMN `display_name` varchar(255); UPDATE user_vm inner join vm_instance on user_vm.id=vm_instance.id set user_vm.iso_id=vm_instance.iso_id, user_vm.display_name=vm_instance.display_name where vm_instance.type='User'; ALTER TABLE `cloud`.`template_host_ref` ADD COLUMN `physical_size` bigint unsigned DEFAULT 0; -UPDATE template_host_ref INNER JOIN template_spool_ref ON template_host_ref.template_id=template_spool_ref.template_id SET template_host_ref.physical_size=template_spool_ref.template_size; +UPDATE template_host_ref INNER JOIN template_spool_ref ON template_host_ref.template_id=template_spool_ref.template_id SET template_host_ref.physical_size=template_spool_ref.template_size; CREATE TABLE `cloud`.`user_vm_details` ( @@ -412,7 +412,7 @@ CREATE TABLE `cloud`.`vpn_users` ( CONSTRAINT `fk_vpn_users__owner_id` FOREIGN KEY (`owner_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, CONSTRAINT `fk_vpn_users__domain_id` FOREIGN KEY (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE, INDEX `i_vpn_users_username`(`username`), - UNIQUE `i_vpn_users__account_id__username`(`owner_id`, `username`) + UNIQUE `i_vpn_users__account_id__username`(`owner_id`, `username`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; ALTER TABLE `cloud`.`storage_pool` ADD COLUMN `status` varchar(32); @@ -490,7 +490,7 @@ CREATE TABLE `cloud`.`usage_event` ( `resource_name` varchar(255), `offering_id` bigint unsigned, `template_id` bigint unsigned, - `size` bigint unsigned, + `size` bigint unsigned, `processed` tinyint NOT NULL default '0', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-2210to2211.sql b/engine/schema/src/main/resources/META-INF/db/schema-2210to2211.sql index 01bec020d63..f352f5ef44d 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-2210to2211.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-2210to2211.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-2211to2212-premium.sql b/engine/schema/src/main/resources/META-INF/db/schema-2211to2212-premium.sql index 0cb187e2b9d..eae13acc78c 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-2211to2212-premium.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-2211to2212-premium.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-2211to2212.sql b/engine/schema/src/main/resources/META-INF/db/schema-2211to2212.sql index 94c3d75a29f..00d0fcaac76 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-2211to2212.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-2211to2212.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-2212to2213.sql b/engine/schema/src/main/resources/META-INF/db/schema-2212to2213.sql index 2e86599f792..c69809e8205 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-2212to2213.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-2212to2213.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-2213to2214.sql b/engine/schema/src/main/resources/META-INF/db/schema-2213to2214.sql index 6c0cc4b6c70..41e3944dc18 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-2213to2214.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-2213to2214.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -29,7 +29,7 @@ CREATE TABLE `cloud`.`mshost_peer` ( `peer_runid` bigint NOT NULL, `peer_state` varchar(10) NOT NULL DEFAULT 'Down', `last_update` DATETIME NULL COMMENT 'Last record update time', - + PRIMARY KEY (`id`), CONSTRAINT `fk_mshost_peer__owner_mshost` FOREIGN KEY (`owner_mshost`) REFERENCES `mshost`(`id`) ON DELETE CASCADE, CONSTRAINT `fk_mshost_peer__peer_mshost` FOREIGN KEY (`peer_mshost`) REFERENCES `mshost`(`id`), diff --git a/engine/schema/src/main/resources/META-INF/db/schema-2214to30-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-2214to30-cleanup.sql index c90707c75b7..844280d29b6 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-2214to30-cleanup.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-2214to30-cleanup.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -22,7 +22,7 @@ ALTER TABLE `cloud_usage`.`usage_network` DROP COLUMN `current_bytes_sent`; ALTER TABLE `cloud`.`template_host_ref` DROP COLUMN `pool_id`; DELETE from `cloud`.`op_host_capacity` where capacity_type in (2,4,6); -ALTER TABLE `cloud`.`vm_instance` DROP COLUMN `private_netmask`; +ALTER TABLE `cloud`.`vm_instance` DROP COLUMN `private_netmask`; ALTER TABLE `cloud`.`security_group_rule` drop foreign key `fk_security_ingress_rule___security_group_id`; ALTER TABLE `cloud`.`security_group_rule` drop foreign key `fk_security_ingress_rule___allowed_network_id`; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-2214to30.sql b/engine/schema/src/main/resources/META-INF/db/schema-2214to30.sql index 22fda616649..8be481e4736 100755 --- a/engine/schema/src/main/resources/META-INF/db/schema-2214to30.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-2214to30.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -54,7 +54,7 @@ CREATE TABLE `cloud`.`projects` ( PRIMARY KEY (`id`), CONSTRAINT `fk_projects__project_account_id` FOREIGN KEY(`project_account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, CONSTRAINT `fk_projects__domain_id` FOREIGN KEY(`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE, - INDEX `i_projects__removed`(`removed`) + INDEX `i_projects__removed`(`removed`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; @@ -177,97 +177,97 @@ ALTER TABLE `cloud`.`alert` ADD `cluster_id` bigint unsigned; ALTER TABLE `cloud`.`user_statistics` ADD COLUMN `agg_bytes_received` bigint unsigned NOT NULL default '0'; ALTER TABLE `cloud`.`user_statistics` ADD COLUMN `agg_bytes_sent` bigint unsigned NOT NULL default '0'; -ALTER TABLE `cloud`.`vm_instance` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`vm_instance` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`vm_instance` ADD CONSTRAINT `uc_vm_instance_uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`async_job` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`async_job` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`async_job` ADD CONSTRAINT `uc_async__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`domain` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`domain` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`domain` ADD CONSTRAINT `uc_domain__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`account` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`account` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`account` ADD CONSTRAINT `uc_account__uuid` UNIQUE (`uuid`); ALTER TABLE `cloud_usage`.`account` ADD COLUMN `uuid` varchar(40); -ALTER TABLE `cloud`.`user` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`user` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`user` ADD CONSTRAINT `uc_user__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`projects` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`projects` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`projects` ADD CONSTRAINT `uc_projects__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`data_center` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`data_center` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`data_center` ADD CONSTRAINT `uc_data_center__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`host` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`host` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`host` ADD CONSTRAINT `uc_host__uuid` UNIQUE (`uuid`); ALTER TABLE `cloud`.`host` ADD COLUMN `update_count` bigint unsigned NOT NULL DEFAULT 0 COMMENT 'atomic increase count making status update operation atomical'; -ALTER TABLE `cloud`.`vm_template` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`vm_template` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`vm_template` ADD CONSTRAINT `uc_vm_template__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`disk_offering` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`disk_offering` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`disk_offering` ADD CONSTRAINT `uc_disk_offering__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`networks` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`networks` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`networks` ADD CONSTRAINT `uc_networks__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`security_group` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`security_group` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`security_group` ADD CONSTRAINT `uc_security_group__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`instance_group` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`instance_group` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`instance_group` ADD CONSTRAINT `uc_instance_group__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`host_pod_ref` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`host_pod_ref` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`host_pod_ref` ADD CONSTRAINT `uc_host_pod_ref__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`snapshots` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`snapshots` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`snapshots` ADD CONSTRAINT `uc_snapshots__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`snapshot_policy` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`snapshot_policy` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`snapshot_policy` ADD CONSTRAINT `uc_snapshot_policy__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`snapshot_schedule` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`snapshot_schedule` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`snapshot_schedule` ADD CONSTRAINT `uc_snapshot_schedule__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`volumes` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`volumes` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`volumes` ADD CONSTRAINT `uc_volumes__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`vlan` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`vlan` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`vlan` ADD CONSTRAINT `uc_vlan__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`user_ip_address` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`user_ip_address` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`user_ip_address` ADD CONSTRAINT `uc_user_ip_address__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`firewall_rules` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`firewall_rules` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`firewall_rules` ADD CONSTRAINT `uc_firewall_rules__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`cluster` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`cluster` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`cluster` ADD CONSTRAINT `uc_cluster__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`network_offerings` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`network_offerings` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`network_offerings` ADD CONSTRAINT `uc_network_offerings__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`hypervisor_capabilities` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`hypervisor_capabilities` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`hypervisor_capabilities` ADD CONSTRAINT `uc_hypervisor_capabilities__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`vpn_users` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`vpn_users` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`vpn_users` ADD CONSTRAINT `uc_vpn_users__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`event` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`event` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`event` ADD CONSTRAINT `uc_event__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`alert` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`alert` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`alert` ADD CONSTRAINT `uc_alert__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`guest_os` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`guest_os` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`guest_os` ADD CONSTRAINT `uc_guest_os__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`guest_os_category` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`guest_os_category` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`guest_os_category` ADD CONSTRAINT `uc_guest_os_category__uuid` UNIQUE (`uuid`); -ALTER TABLE `cloud`.`nics` ADD COLUMN `uuid` varchar(40); +ALTER TABLE `cloud`.`nics` ADD COLUMN `uuid` varchar(40); ALTER TABLE `cloud`.`nics` ADD CONSTRAINT `uc_nics__uuid` UNIQUE (`uuid`); ALTER TABLE `cloud`.`op_host_capacity` ADD COLUMN `created` datetime; @@ -304,7 +304,7 @@ ALTER TABLE `cloud_usage`.`usage_ip_address` ADD COLUMN `is_system` smallint(1) INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Premium', 'DEFAULT', 'management-server', 'usage.sanity.check.interval', null, 'Interval (in days) to check sanity of usage data'); DELETE FROM `cloud`.`configuration` WHERE name='host.capacity.checker.wait'; -DELETE FROM `cloud`.`configuration` WHERE name='host.capacity.checker.interval'; +DELETE FROM `cloud`.`configuration` WHERE name='host.capacity.checker.interval'; INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'disable.extraction' , 'false', 'Flag for disabling extraction of template, isos and volumes'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'NetworkManager', 'router.check.interval' , '30', 'Interval (in seconds) to report redundant router status.'); @@ -362,9 +362,9 @@ CREATE TABLE `cloud`.`physical_network` ( `name` varchar(255) NOT NULL, `data_center_id` bigint unsigned NOT NULL COMMENT 'data center id that this physical network belongs to', `vnet` varchar(255), - `speed` varchar(32), + `speed` varchar(32), `domain_id` bigint unsigned COMMENT 'foreign key to domain id', - `broadcast_domain_range` varchar(32) NOT NULL DEFAULT 'POD' COMMENT 'range of broadcast domain : POD/ZONE', + `broadcast_domain_range` varchar(32) NOT NULL DEFAULT 'POD' COMMENT 'range of broadcast domain : POD/ZONE', `state` varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'what state is this configuration in', `created` datetime COMMENT 'date created', `removed` datetime COMMENT 'date removed if not null', @@ -372,7 +372,7 @@ CREATE TABLE `cloud`.`physical_network` ( CONSTRAINT `fk_physical_network__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `data_center`(`id`) ON DELETE CASCADE, CONSTRAINT `fk_physical_network__domain_id` FOREIGN KEY(`domain_id`) REFERENCES `domain`(`id`), CONSTRAINT `uc_physical_networks__uuid` UNIQUE (`uuid`), - INDEX `i_physical_network__removed`(`removed`) + INDEX `i_physical_network__removed`(`removed`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE TABLE `cloud`.`physical_network_tags` ( @@ -690,7 +690,7 @@ CREATE TABLE `cloud_usage`.`usage_security_group` ( `vm_instance_id` bigint unsigned NOT NULL, `security_group_id` bigint unsigned NOT NULL, `created` DATETIME NOT NULL, - `deleted` DATETIME NULL + `deleted` DATETIME NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8; ALTER TABLE `cloud_usage`.`usage_security_group` ADD INDEX `i_usage_security_group__account_id`(`account_id`); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-221to222-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-221to222-cleanup.sql index d999b939103..5ed5f834f68 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-221to222-cleanup.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-221to222-cleanup.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-221to222-premium.sql b/engine/schema/src/main/resources/META-INF/db/schema-221to222-premium.sql index 5477fd8a348..01603a40658 100755 --- a/engine/schema/src/main/resources/META-INF/db/schema-221to222-premium.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-221to222-premium.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-221to222.sql b/engine/schema/src/main/resources/META-INF/db/schema-221to222.sql index 0c663b1ca03..d07b71fb6cb 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-221to222.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-221to222.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -23,7 +23,7 @@ update network_offerings set firewall_service=1, lb_service=1,vpn_service=1,gate alter table domain add column `state` char(32) NOT NULL default 'Active' COMMENT 'state of the domain'; alter table nics add column `vm_type` char(32); update nics set vm_type=(select type from vm_instance where vm_instance.id=nics.instance_id); -INSERT INTO configuration (`category`, `instance`, `component`, `name`, `value`, `description`) VALUES ('Network','DEFAULT','none','network.guest.cidr.limit','22','size limit for guest cidr; cant be less than this value'); +INSERT INTO configuration (`category`, `instance`, `component`, `name`, `value`, `description`) VALUES ('Network','DEFAULT','none','network.guest.cidr.limit','22','size limit for guest cidr; cant be less than this value'); alter table user_statistics add column `network_id` bigint unsigned; update op_networks set nics_count=(nics_count-1) where id in (select d.network_id from domain_router d, vm_instance i where i.state='Running' and i.id=d.id); update network_offerings set traffic_type='Guest' where system_only=0; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-222to224-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-222to224-cleanup.sql index 1bcd5d4c5f0..31f6eefb5fa 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-222to224-cleanup.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-222to224-cleanup.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-222to224-premium.sql b/engine/schema/src/main/resources/META-INF/db/schema-222to224-premium.sql index 9a5f62794c4..33d954d3a88 100755 --- a/engine/schema/src/main/resources/META-INF/db/schema-222to224-premium.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-222to224-premium.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-222to224.sql b/engine/schema/src/main/resources/META-INF/db/schema-222to224.sql index 8be64169b44..abbaf447bba 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-222to224.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-222to224.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -99,7 +99,7 @@ ALTER TABLE `cloud`.`op_host_capacity` MODIFY `used_capacity` bigint signed NOT ALTER TABLE `cloud`.`op_host_capacity` MODIFY `reserved_capacity` bigint signed NOT NULL; ALTER TABLE `cloud`.`op_host_capacity` MODIFY `total_capacity` bigint signed NOT NULL; -INSERT IGNORE INTO `cloud`.`configuration` VALUES +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced','DEFAULT','management-server','control.cidr','169.254.0.0/16','Changes the cidr for the control network traffic. Defaults to using link local. Must be unique within pods'), ('Advanced','DEFAULT','management-server','control.gateway','169.254.0.1','gateway for the control network traffic'), ('Advanced','DEFAULT','AgentManager','cmd.wait','7200','Time (in seconds) to wait for some heavy time-consuming commands'), @@ -110,7 +110,7 @@ INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Console Proxy','DEFAULT','AgentManager','consoleproxy.url.domain','realhostip.com','Console proxy url domain'), ('Advanced','DEFAULT','management-server','extract.url.cleanup.interval','120','The interval (in seconds) to wait before cleaning up the extract URL\'s '), ('Network','DEFAULT','AgentManager','guest.ip.network','10.1.1.1','The network address of the guest virtual network. Virtual machines will be assigned an IP in this subnet.'), -('Network','DEFAULT','AgentManager','guest.netmask','255.255.255.0','The netmask of the guest virtual network.'), +('Network','DEFAULT','AgentManager','guest.netmask','255.255.255.0','The netmask of the guest virtual network.'), ('Network','DEFAULT','management-server','guest.vlan.bits','12','The number of bits to reserve for the VLAN identifier in the guest subnet.'), ('Advanced','DEFAULT','management-server','host.capacity.checker.interval','3600','Time (in seconds) to wait before recalculating host\'s capacity'), ('Advanced','DEFAULT','management-server','host.capacity.checker.wait','3600','Time (in seconds) to wait before starting host capacity background checker'), @@ -175,7 +175,7 @@ ALTER TABLE `cloud`.`snapshot_schedule` ADD UNIQUE KEY `volume_id` (`volume_id` ALTER TABLE `cloud`.`storage_pool` MODIFY COLUMN `uuid` varchar(255) UNIQUE; ALTER TABLE `cloud`.`user_statistics` DROP KEY `account_id`; -ALTER TABLE `cloud`.`user_statistics` ADD UNIQUE KEY `account_id` (`account_id`,`data_center_id`, `public_ip_address`, `device_id`,`device_type`); +ALTER TABLE `cloud`.`user_statistics` ADD UNIQUE KEY `account_id` (`account_id`,`data_center_id`, `public_ip_address`, `device_id`,`device_type`); ALTER TABLE `cloud`.`usage_event` ADD INDEX `i_usage_event__created`(`created`); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-224to225-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-224to225-cleanup.sql index b018d7f280e..5f24290a1fd 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-224to225-cleanup.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-224to225-cleanup.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-224to225.sql b/engine/schema/src/main/resources/META-INF/db/schema-224to225.sql index 65334af306f..735c30a943c 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-224to225.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-224to225.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-225to226.sql b/engine/schema/src/main/resources/META-INF/db/schema-225to226.sql index ec1baae2e69..3527921af84 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-225to226.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-225to226.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-227to228-premium.sql b/engine/schema/src/main/resources/META-INF/db/schema-227to228-premium.sql index 40fcbfa6838..2e21f0c249e 100755 --- a/engine/schema/src/main/resources/META-INF/db/schema-227to228-premium.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-227to228-premium.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-227to228.sql b/engine/schema/src/main/resources/META-INF/db/schema-227to228.sql index 343c7663fd2..ac3c014894d 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-227to228.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-227to228.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -136,9 +136,9 @@ ALTER TABLE `cloud`.`vm_instance` ADD COLUMN `vm_type` varchar(32) NOT NULL; UPDATE vm_instance set vm_type=type; ALTER TABLE `cloud`.`networks` ADD COLUMN `is_domain_specific` int(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 if network is domain specific, 0 false otherwise'; -INSERT INTO configuration (`category`, `instance`, `component`, `name`, `value`, `description`) VALUES ('Advanced', 'DEFAULT', 'NetworkManager', 'allow.subdomain.network.access', 'true', 'Allow subdomains to use networks dedicated to their parent domain(s)'); +INSERT INTO configuration (`category`, `instance`, `component`, `name`, `value`, `description`) VALUES ('Advanced', 'DEFAULT', 'NetworkManager', 'allow.subdomain.network.access', 'true', 'Allow subdomains to use networks dedicated to their parent domain(s)'); -INSERT INTO configuration (`category`, `instance`, `component`, `name`, `value`, `description`) VALUES ('Advanced', 'DEFAULT', 'management-server', 'encode.api.response', 'false', 'Do UTF-8 encoding for the api response, false by default'); +INSERT INTO configuration (`category`, `instance`, `component`, `name`, `value`, `description`) VALUES ('Advanced', 'DEFAULT', 'management-server', 'encode.api.response', 'false', 'Do UTF-8 encoding for the api response, false by default'); DELETE FROM load_balancer_vm_map WHERE instance_id IN (SELECT id FROM vm_instance WHERE removed IS NOT NULL); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-228to229.sql b/engine/schema/src/main/resources/META-INF/db/schema-228to229.sql index 9d5baa4c403..2496dd4c472 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-228to229.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-228to229.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-229to2210.sql b/engine/schema/src/main/resources/META-INF/db/schema-229to2210.sql index 9c5c46242af..1d2980f6564 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-229to2210.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-229to2210.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-22beta1to22beta2.sql b/engine/schema/src/main/resources/META-INF/db/schema-22beta1to22beta2.sql index 1b7c6a64eb0..dd03e61e17e 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-22beta1to22beta2.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-22beta1to22beta2.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-22beta3to22beta4.sql b/engine/schema/src/main/resources/META-INF/db/schema-22beta3to22beta4.sql index c73d16537ac..a93d6d5d7e5 100755 --- a/engine/schema/src/main/resources/META-INF/db/schema-22beta3to22beta4.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-22beta3to22beta4.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -83,7 +83,7 @@ CREATE TABLE `cloud`.`user_vm_details` ( `value` varchar(1024) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; - + CREATE TABLE `cloud`.`cluster_details` ( `id` bigint unsigned NOT NULL auto_increment, `cluster_id` bigint unsigned NOT NULL COMMENT 'cluster id', @@ -99,9 +99,9 @@ ALTER TABLE `cloud`.`service_offering` ADD COLUMN `host_tag` varchar(255); ALTER TABLE `cloud`.`op_it_work` change created created_at bigint unsigned NOT NULL COMMENT 'when was this work detail created'; ALTER TABLE `cloud`.`op_it_work` change state step char(32) NOT NULL COMMENT 'state'; ALTER TABLE `cloud`.`op_it_work` change cancel_taken updated_at bigint unsigned NOT NULL COMMENT 'time it was taken over'; -ALTER TABLE `cloud`.`op_it_work` ADD COLUMN `instance_id` bigint unsigned NOT NULL COMMENT 'vm instance'; -ALTER TABLE `cloud`.`op_it_work` ADD COLUMN `resource_id` bigint unsigned COMMENT 'resource id being worked on'; -ALTER TABLE `cloud`.`op_it_work` ADD COLUMN `resource_type` char(32) COMMENT 'type of resource being worked on'; +ALTER TABLE `cloud`.`op_it_work` ADD COLUMN `instance_id` bigint unsigned NOT NULL COMMENT 'vm instance'; +ALTER TABLE `cloud`.`op_it_work` ADD COLUMN `resource_id` bigint unsigned COMMENT 'resource id being worked on'; +ALTER TABLE `cloud`.`op_it_work` ADD COLUMN `resource_type` char(32) COMMENT 'type of resource being worked on'; ALTER TABLE `cloud`.`hypervsior_properties` ADD COLUMN `is_default` int(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 if network is default'; ALTER TABLE `cloud`.`network_offerings` drop column TYPE; ALTER TABLE `cloud`.`domain_router` ADD COLUMN `host_tag` varchar(255) COMMENT 'host tag specified by the service_offering'; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-301to302-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-301to302-cleanup.sql index 7922d98ea99..d32644f471c 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-301to302-cleanup.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-301to302-cleanup.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-301to302.sql b/engine/schema/src/main/resources/META-INF/db/schema-301to302.sql index 4532757d052..99a555dedca 100755 --- a/engine/schema/src/main/resources/META-INF/db/schema-301to302.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-301to302.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -41,7 +41,7 @@ INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Hidden', 'DEFAULT', 'managem INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Network', 'DEFAULT', 'management-server', 'network.disable.rpfilter', 'true', 'disable rp_filter on Domain Router VM public interfaces.'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Network', 'DEFAULT', 'management-server', 'network.securitygroups.work.cleanup.interval', '120', 'Time interval (seconds) in which finished work is cleaned up from the work table'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Network', 'DEFAULT', 'management-server', 'network.securitygroups.work.lock.timeout', '300', 'Lock wait timeout (seconds) while updating the security group work queues'); -INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Network', 'DEFAULT', 'management-server', 'network.securitygroups.work.per.agent.queue.size', '100', +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Network', 'DEFAULT', 'management-server', 'network.securitygroups.work.per.agent.queue.size', '100', 'The number of outstanding security group work items that can be queued to a host. If exceeded, work items will get dropped to conserve memory. Security Group Sync will take care of ensuring that the host gets updated eventually'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Network', 'DEFAULT', 'management-server', 'network.securitygroups.workers.pool.size', '50', 'Number of worker threads processing the security group update work queue'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Hidden', 'DEFAULT', 'management-server', 'ovm.guest.network.device', null, 'Specify the private bridge on host for private network'); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-302to303.sql b/engine/schema/src/main/resources/META-INF/db/schema-302to303.sql index b475a8e9958..07faf98cf74 100755 --- a/engine/schema/src/main/resources/META-INF/db/schema-302to303.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-302to303.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -55,7 +55,7 @@ CREATE TABLE `cloud`.`volume_host_ref` ( `local_path` varchar(255), `install_path` varchar(255), `url` varchar(255), - `format` varchar(32) NOT NULL COMMENT 'format for the volume', + `format` varchar(32) NOT NULL COMMENT 'format for the volume', `destroyed` tinyint(1) COMMENT 'indicates whether the volume_host entry was destroyed by the user or not', PRIMARY KEY (`id`), CONSTRAINT `fk_volume_host_ref__host_id` FOREIGN KEY `fk_volume_host_ref__host_id` (`host_id`) REFERENCES `host` (`id`) ON DELETE CASCADE, diff --git a/engine/schema/src/main/resources/META-INF/db/schema-302to40-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-302to40-cleanup.sql index 4d89a078b2d..540c7716189 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-302to40-cleanup.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-302to40-cleanup.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-302to40.sql b/engine/schema/src/main/resources/META-INF/db/schema-302to40.sql index ca99f0106d2..47e4c3fd6d5 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-302to40.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-302to40.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -59,7 +59,7 @@ CREATE TABLE `cloud`.`volume_host_ref` ( `local_path` varchar(255), `install_path` varchar(255), `url` varchar(255), - `format` varchar(32) NOT NULL COMMENT 'format for the volume', + `format` varchar(32) NOT NULL COMMENT 'format for the volume', `destroyed` tinyint(1) COMMENT 'indicates whether the volume_host entry was destroyed by the user or not', PRIMARY KEY (`id`), CONSTRAINT `fk_volume_host_ref__host_id` FOREIGN KEY `fk_volume_host_ref__host_id` (`host_id`) REFERENCES `host` (`id`) ON DELETE CASCADE, @@ -236,9 +236,9 @@ from information_schema.key_column_usage A JOIN information_schema.key_column_usage B ON B.table_name = 'physical_network_service_providers' AND B.COLUMN_NAME = 'provider_name' AND A.COLUMN_NAME ='physical_network_id' AND B.CONSTRAINT_NAME=A.CONSTRAINT_NAME where A.table_name = 'physical_network_service_providers' LIMIT 1); -PREPARE stmt1 FROM @constraintname; -EXECUTE stmt1; -DEALLOCATE PREPARE stmt1; +PREPARE stmt1 FROM @constraintname; +EXECUTE stmt1; +DEALLOCATE PREPARE stmt1; AlTER TABLE `cloud`.`physical_network_service_providers` ADD CONSTRAINT `fk_pnetwork_service_providers__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE; UPDATE `cloud`.`configuration` SET description='In second, timeout for creating volume from snapshot' WHERE name='create.volume.from.snapshot.wait'; @@ -299,7 +299,7 @@ CREATE TABLE `cloud`.`vpc` ( PRIMARY KEY (`id`), INDEX `i_vpc__removed`(`removed`), CONSTRAINT `fk_vpc__zone_id` FOREIGN KEY `fk_vpc__zone_id` (`zone_id`) REFERENCES `data_center` (`id`) ON DELETE CASCADE, - CONSTRAINT `fk_vpc__vpc_offering_id` FOREIGN KEY (`vpc_offering_id`) REFERENCES `vpc_offerings`(`id`), + CONSTRAINT `fk_vpc__vpc_offering_id` FOREIGN KEY (`vpc_offering_id`) REFERENCES `vpc_offerings`(`id`), CONSTRAINT `fk_vpc__account_id` FOREIGN KEY `fk_vpc__account_id` (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, CONSTRAINT `fk_vpc__domain_id` FOREIGN KEY `fk_vpc__domain_id` (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8; @@ -360,7 +360,7 @@ CREATE TABLE `cloud`.`static_routes` ( `id` bigint unsigned NOT NULL auto_increment COMMENT 'id', `uuid` varchar(40), `vpc_gateway_id` bigint unsigned COMMENT 'id of the corresponding ip address', - `cidr` varchar(18) COMMENT 'cidr for the static route', + `cidr` varchar(18) COMMENT 'cidr for the static route', `state` char(32) NOT NULL COMMENT 'current state of this rule', `vpc_id` bigint unsigned COMMENT 'vpc the firewall rule is associated with', `account_id` bigint unsigned NOT NULL COMMENT 'owner id', @@ -468,7 +468,7 @@ UPDATE `cloud`.`configuration` SET description='Comma separated list of cidrs in INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Network', 'DEFAULT', 'management-server', 'site2site.vpn.vpngateway.connection.limit', '4', 'The maximum number of VPN connection per VPN gateway'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Network', 'DEFAULT', 'management-server', 'site2site.vpn.customergateway.subnets.limit', '10', 'The maximum number of subnets per customer gateway'); -INSERT IGNORE INTO `cloud`.`guest_os_category` VALUES ('11','None',NULL); +INSERT IGNORE INTO `cloud`.`guest_os_category` VALUES ('11','None',NULL); ALTER TABLE `cloud`.`user` ADD COLUMN `incorrect_login_attempts` integer unsigned NOT NULL DEFAULT '0'; INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'incorrect.login.attempts.allowed', '5', 'Incorrect login attempts allowed before the user is disabled'); UPDATE `cloud`.`configuration` set description ='Uuid of the service offering used by console proxy; if NULL - system offering will be used' where name ='consoleproxy.service.offering'; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-304to305-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-304to305-cleanup.sql index 3b5c8f5a356..1184c98e182 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-304to305-cleanup.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-304to305-cleanup.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-304to305.sql b/engine/schema/src/main/resources/META-INF/db/schema-304to305.sql index dfeff3f683b..cb2efb3edd0 100755 --- a/engine/schema/src/main/resources/META-INF/db/schema-304to305.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-304to305.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -80,7 +80,7 @@ CREATE TABLE `cloud`.`vpc` ( PRIMARY KEY (`id`), INDEX `i_vpc__removed`(`removed`), CONSTRAINT `fk_vpc__zone_id` FOREIGN KEY `fk_vpc__zone_id` (`zone_id`) REFERENCES `data_center` (`id`) ON DELETE CASCADE, - CONSTRAINT `fk_vpc__vpc_offering_id` FOREIGN KEY (`vpc_offering_id`) REFERENCES `vpc_offerings`(`id`), + CONSTRAINT `fk_vpc__vpc_offering_id` FOREIGN KEY (`vpc_offering_id`) REFERENCES `vpc_offerings`(`id`), CONSTRAINT `fk_vpc__account_id` FOREIGN KEY `fk_vpc__account_id` (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, CONSTRAINT `fk_vpc__domain_id` FOREIGN KEY `fk_vpc__domain_id` (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8; @@ -141,7 +141,7 @@ CREATE TABLE `cloud`.`static_routes` ( `id` bigint unsigned NOT NULL auto_increment COMMENT 'id', `uuid` varchar(40), `vpc_gateway_id` bigint unsigned COMMENT 'id of the corresponding ip address', - `cidr` varchar(18) COMMENT 'cidr for the static route', + `cidr` varchar(18) COMMENT 'cidr for the static route', `state` char(32) NOT NULL COMMENT 'current state of this rule', `vpc_id` bigint unsigned COMMENT 'vpc the firewall rule is associated with', `account_id` bigint unsigned NOT NULL COMMENT 'owner id', diff --git a/engine/schema/src/main/resources/META-INF/db/schema-305to306-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-305to306-cleanup.sql index f15ad4fbc5c..850d48b6526 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-305to306-cleanup.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-305to306-cleanup.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-305to306.sql b/engine/schema/src/main/resources/META-INF/db/schema-305to306.sql index b1294a21054..e9a620bb4cc 100755 --- a/engine/schema/src/main/resources/META-INF/db/schema-305to306.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-305to306.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-306to307.sql b/engine/schema/src/main/resources/META-INF/db/schema-306to307.sql index a43833efa43..0ddee9ec5db 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-306to307.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-306to307.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-307to410.sql b/engine/schema/src/main/resources/META-INF/db/schema-307to410.sql index 944d910fec4..55d78b59437 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-307to410.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-307to410.sql @@ -22,7 +22,7 @@ SET foreign_key_checks = 0; --- DB upgrade steps from 302-40 +-- DB upgrade steps from 302-40 CREATE TABLE `cloud`.`external_nicira_nvp_devices` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', `uuid` varchar(255) UNIQUE, @@ -59,9 +59,9 @@ from information_schema.key_column_usage A JOIN information_schema.key_column_usage B ON B.table_name = 'physical_network_service_providers' AND B.COLUMN_NAME = 'provider_name' AND A.COLUMN_NAME ='physical_network_id' AND B.CONSTRAINT_NAME=A.CONSTRAINT_NAME where A.table_name = 'physical_network_service_providers' LIMIT 1); -PREPARE stmt1 FROM @constraintname; -EXECUTE stmt1; -DEALLOCATE PREPARE stmt1; +PREPARE stmt1 FROM @constraintname; +EXECUTE stmt1; +DEALLOCATE PREPARE stmt1; AlTER TABLE physical_network_service_providers ADD CONSTRAINT `fk_pnetwork_service_providers__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE; UPDATE `cloud`.`configuration` SET description='Do URL encoding for the api response, false by default' WHERE name='encode.api.response'; @@ -351,8 +351,8 @@ ALTER TABLE `cloud`.`vlan` ADD COLUMN `ip6_range` varchar(255); ALTER TABLE `cloud`.`data_center` ADD COLUMN `ip6_dns1` varchar(255); ALTER TABLE `cloud`.`data_center` ADD COLUMN `ip6_dns2` varchar(255); -UPDATE `cloud`.`networks` INNER JOIN `cloud`.`vlan` ON networks.id = vlan.network_id -SET networks.gateway = vlan.vlan_gateway, networks.ip6_gateway = vlan.ip6_gateway, networks.ip6_cidr = vlan.ip6_cidr +UPDATE `cloud`.`networks` INNER JOIN `cloud`.`vlan` ON networks.id = vlan.network_id +SET networks.gateway = vlan.vlan_gateway, networks.ip6_gateway = vlan.ip6_gateway, networks.ip6_cidr = vlan.ip6_cidr WHERE networks.data_center_id = vlan.data_center_id AND networks.physical_network_id = vlan.physical_network_id; -- DB views for list api diff --git a/engine/schema/src/main/resources/META-INF/db/schema-30to301.sql b/engine/schema/src/main/resources/META-INF/db/schema-30to301.sql index 0cc51e7d08f..81339bf2a53 100755 --- a/engine/schema/src/main/resources/META-INF/db/schema-30to301.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-30to301.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-40to410.sql b/engine/schema/src/main/resources/META-INF/db/schema-40to410.sql index 1b3a29b27a4..845b31ca04a 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-40to410.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-40to410.sql @@ -457,15 +457,15 @@ ALTER TABLE `cloud`.`vlan` ADD COLUMN `ip6_range` varchar(255); ALTER TABLE `cloud`.`data_center` ADD COLUMN `ip6_dns1` varchar(255); ALTER TABLE `cloud`.`data_center` ADD COLUMN `ip6_dns2` varchar(255); -UPDATE `cloud`.`networks` INNER JOIN `cloud`.`vlan` ON networks.id = vlan.network_id -SET networks.gateway = vlan.vlan_gateway, networks.ip6_gateway = vlan.ip6_gateway, networks.ip6_cidr = vlan.ip6_cidr +UPDATE `cloud`.`networks` INNER JOIN `cloud`.`vlan` ON networks.id = vlan.network_id +SET networks.gateway = vlan.vlan_gateway, networks.ip6_gateway = vlan.ip6_gateway, networks.ip6_cidr = vlan.ip6_cidr WHERE networks.data_center_id = vlan.data_center_id AND networks.physical_network_id = vlan.physical_network_id; -- DB views for list api DROP VIEW IF EXISTS `cloud`.`user_vm_view`; CREATE VIEW `cloud`.`user_vm_view` AS - select + select vm_instance.id id, vm_instance.name name, user_vm.display_name display_name, @@ -504,7 +504,7 @@ CREATE VIEW `cloud`.`user_vm_view` AS vm_instance.vm_type vm_type, data_center.id data_center_id, data_center.uuid data_center_uuid, - data_center.name data_center_name, + data_center.name data_center_name, data_center.is_security_group_enabled security_group_enabled, host.id host_id, host.uuid host_uuid, @@ -634,7 +634,7 @@ CREATE VIEW `cloud`.`user_vm_view` AS DROP VIEW IF EXISTS `cloud`.`domain_router_view`; CREATE VIEW `cloud`.`domain_router_view` AS - select + select vm_instance.id id, vm_instance.name name, account.id account_id, @@ -740,7 +740,7 @@ CREATE VIEW `cloud`.`domain_router_view` AS DROP VIEW IF EXISTS `cloud`.`security_group_view`; CREATE VIEW `cloud`.`security_group_view` AS - select + select security_group.id id, security_group.name name, security_group.description description, @@ -799,7 +799,7 @@ CREATE VIEW `cloud`.`security_group_view` AS DROP VIEW IF EXISTS `cloud`.`resource_tag_view`; CREATE VIEW `cloud`.`resource_tag_view` AS - select + select resource_tags.id, resource_tags.uuid, resource_tags.key, @@ -831,7 +831,7 @@ CREATE VIEW `cloud`.`resource_tag_view` AS DROP VIEW IF EXISTS `cloud`.`event_view`; CREATE VIEW `cloud`.`event_view` AS - select + select event.id, event.uuid, event.type, @@ -870,7 +870,7 @@ CREATE VIEW `cloud`.`event_view` AS DROP VIEW IF EXISTS `cloud`.`instance_group_view`; CREATE VIEW `cloud`.`instance_group_view` AS - select + select instance_group.id, instance_group.uuid, instance_group.name, @@ -898,7 +898,7 @@ CREATE VIEW `cloud`.`instance_group_view` AS DROP VIEW IF EXISTS `cloud`.`user_view`; CREATE VIEW `cloud`.`user_view` AS - select + select user.id, user.uuid, user.username, @@ -941,7 +941,7 @@ CREATE VIEW `cloud`.`user_view` AS DROP VIEW IF EXISTS `cloud`.`project_view`; CREATE VIEW `cloud`.`project_view` AS - select + select projects.id, projects.uuid, projects.name, @@ -982,7 +982,7 @@ CREATE VIEW `cloud`.`project_view` AS DROP VIEW IF EXISTS `cloud`.`project_account_view`; CREATE VIEW `cloud`.`project_account_view` AS - select + select project_account.id, account.id account_id, account.uuid account_uuid, @@ -1007,7 +1007,7 @@ CREATE VIEW `cloud`.`project_account_view` AS DROP VIEW IF EXISTS `cloud`.`project_invitation_view`; CREATE VIEW `cloud`.`project_invitation_view` AS - select + select project_invitations.id, project_invitations.uuid, project_invitations.email, @@ -1035,7 +1035,7 @@ CREATE VIEW `cloud`.`project_invitation_view` AS DROP VIEW IF EXISTS `cloud`.`host_view`; CREATE VIEW `cloud`.`host_view` AS - select + select host.id, host.uuid, host.name, @@ -1105,7 +1105,7 @@ CREATE VIEW `cloud`.`host_view` AS DROP VIEW IF EXISTS `cloud`.`volume_view`; CREATE VIEW `cloud`.`volume_view` AS - select + select volumes.id, volumes.uuid, volumes.name, @@ -1206,7 +1206,7 @@ CREATE VIEW `cloud`.`volume_view` AS DROP VIEW IF EXISTS `cloud`.`account_netstats_view`; CREATE VIEW `cloud`.`account_netstats_view` AS - SELECT + SELECT account_id, sum(net_bytes_received) + sum(current_bytes_received) as bytesReceived, sum(net_bytes_sent) + sum(current_bytes_sent) as bytesSent @@ -1217,7 +1217,7 @@ CREATE VIEW `cloud`.`account_netstats_view` AS DROP VIEW IF EXISTS `cloud`.`account_vmstats_view`; CREATE VIEW `cloud`.`account_vmstats_view` AS - SELECT + SELECT account_id, state, count(*) as vmcount from `cloud`.`vm_instance` @@ -1225,7 +1225,7 @@ CREATE VIEW `cloud`.`account_vmstats_view` AS DROP VIEW IF EXISTS `cloud`.`free_ip_view`; CREATE VIEW `cloud`.`free_ip_view` AS - select + select count(user_ip_address.id) free_ip from `cloud`.`user_ip_address` @@ -1237,7 +1237,7 @@ CREATE VIEW `cloud`.`free_ip_view` AS DROP VIEW IF EXISTS `cloud`.`account_view`; CREATE VIEW `cloud`.`account_view` AS - select + select account.id, account.uuid, account.account_name, @@ -1348,7 +1348,7 @@ CREATE VIEW `cloud`.`account_view` AS DROP VIEW IF EXISTS `cloud`.`async_job_view`; CREATE VIEW `cloud`.`async_job_view` AS - select + select account.id account_id, account.uuid account_uuid, account.account_name account_name, @@ -1457,7 +1457,7 @@ CREATE VIEW `cloud`.`async_job_view` AS DROP VIEW IF EXISTS `cloud`.`storage_pool_view`; CREATE VIEW `cloud`.`storage_pool_view` AS - select + select storage_pool.id, storage_pool.uuid, storage_pool.name, @@ -1475,7 +1475,7 @@ CREATE VIEW `cloud`.`storage_pool_view` AS cluster.cluster_type, data_center.id data_center_id, data_center.uuid data_center_uuid, - data_center.name data_center_name, + data_center.name data_center_name, host_pod_ref.id pod_id, host_pod_ref.uuid pod_uuid, host_pod_ref.name pod_name, @@ -1507,7 +1507,7 @@ CREATE VIEW `cloud`.`storage_pool_view` AS DROP VIEW IF EXISTS `cloud`.`disk_offering_view`; CREATE VIEW `cloud`.`disk_offering_view` AS - select + select disk_offering.id, disk_offering.uuid, disk_offering.name, @@ -1532,7 +1532,7 @@ CREATE VIEW `cloud`.`disk_offering_view` AS DROP VIEW IF EXISTS `cloud`.`service_offering_view`; CREATE VIEW `cloud`.`service_offering_view` AS - select + select service_offering.id, disk_offering.uuid, disk_offering.name, @@ -1563,10 +1563,10 @@ CREATE VIEW `cloud`.`service_offering_view` AS `cloud`.`disk_offering` ON service_offering.id = disk_offering.id left join `cloud`.`domain` ON disk_offering.domain_id = domain.id; - + DROP VIEW IF EXISTS `cloud`.`data_center_view`; CREATE VIEW `cloud`.`data_center_view` AS - select + select data_center.id, data_center.uuid, data_center.name, @@ -1593,8 +1593,8 @@ CREATE VIEW `cloud`.`data_center_view` AS from `cloud`.`data_center` left join - `cloud`.`domain` ON data_center.domain_id = domain.id; - + `cloud`.`domain` ON data_center.domain_id = domain.id; + CREATE TABLE `cloud`.`baremetal_dhcp_devices` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41000to41100.sql b/engine/schema/src/main/resources/META-INF/db/schema-41000to41100.sql index 6148ee11bcf..0b1779d7e2b 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41000to41100.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41000to41100.sql @@ -19,51 +19,6 @@ -- Schema upgrade from 4.10.0.0 to 4.11.0.0 --; ---; --- Stored procedure to do idempotent column add; ---; -DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_ADD_COLUMN`; - -CREATE PROCEDURE `cloud`.`IDEMPOTENT_ADD_COLUMN` ( - IN in_table_name VARCHAR(200) - , IN in_column_name VARCHAR(200) - , IN in_column_definition VARCHAR(1000) -) -BEGIN - - DECLARE CONTINUE HANDLER FOR 1060 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', 'ADD COLUMN') ; SET @ddl = CONCAT(@ddl, ' ', in_column_name); SET @ddl = CONCAT(@ddl, ' ', in_column_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; - -DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_DROP_FOREIGN_KEY`; - -CREATE PROCEDURE `cloud`.`IDEMPOTENT_DROP_FOREIGN_KEY` ( - IN in_table_name VARCHAR(200) - , IN in_foreign_key_name VARCHAR(200) -) -BEGIN - - DECLARE CONTINUE HANDLER FOR 1091 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', ' DROP FOREIGN KEY '); SET @ddl = CONCAT(@ddl, ' ', in_foreign_key_name); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; - -DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_DROP_INDEX`; - -CREATE PROCEDURE `cloud`.`IDEMPOTENT_DROP_INDEX` ( - IN in_index_name VARCHAR(200) - , IN in_table_name VARCHAR(200) -) -BEGIN - - DECLARE CONTINUE HANDLER FOR 1091 BEGIN END; SET @ddl = CONCAT('DROP INDEX ', in_index_name); SET @ddl = CONCAT(@ddl, ' ', ' ON ') ; SET @ddl = CONCAT(@ddl, ' ', in_table_name); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; - -DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_CREATE_UNIQUE_INDEX`; - -CREATE PROCEDURE `cloud`.`IDEMPOTENT_CREATE_UNIQUE_INDEX` ( - IN in_index_name VARCHAR(200) - , IN in_table_name VARCHAR(200) - , IN in_index_definition VARCHAR(1000) -) -BEGIN - - DECLARE CONTINUE HANDLER FOR 1061 BEGIN END; SET @ddl = CONCAT('CREATE UNIQUE INDEX ', in_index_name); SET @ddl = CONCAT(@ddl, ' ', ' ON ') ; SET @ddl = CONCAT(@ddl, ' ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', in_index_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; - -- Add For VPC flag CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.network_offerings','for_vpc', 'INT(1) NOT NULL DEFAULT 0'); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql b/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql index 3556e7e1b4a..35f73b35d3c 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql @@ -113,7 +113,7 @@ CREATE TABLE `cloud`.`image_store` ( `uuid` varchar(255) COMMENT 'uuid of data store', `parent` varchar(255) COMMENT 'parent path for the storage server', `created` datetime COMMENT 'date the image store first signed on', - `removed` datetime COMMENT 'date removed if not null', + `removed` datetime COMMENT 'date removed if not null', `total_size` bigint unsigned COMMENT 'storage total size statistics', `used_bytes` bigint unsigned COMMENT 'storage available bytes statistics', PRIMARY KEY(`id`) @@ -131,7 +131,7 @@ CREATE TABLE `cloud`.`image_store_details` ( DROP VIEW IF EXISTS `cloud`.`image_store_view`; CREATE VIEW `cloud`.`image_store_view` AS - select + select image_store.id, image_store.uuid, image_store.name, @@ -153,9 +153,9 @@ CREATE VIEW `cloud`.`image_store_view` AS left join `cloud`.`image_store_details` ON image_store_details.store_id = image_store.id; - + -- here we have to allow null for store_id to accommodate baremetal case to search for ready templates since template state is only stored in this table --- FK also commented out due to this +-- FK also commented out due to this CREATE TABLE `cloud`.`template_store_ref` ( `id` bigint unsigned NOT NULL auto_increment, `store_id` bigint unsigned, @@ -165,7 +165,7 @@ CREATE TABLE `cloud`.`template_store_ref` ( `job_id` varchar(255), `download_pct` int(10) unsigned, `size` bigint unsigned, - `store_role` varchar(255), + `store_role` varchar(255), `physical_size` bigint unsigned DEFAULT 0, `download_state` varchar(255), `error_str` varchar(255), @@ -177,7 +177,7 @@ CREATE TABLE `cloud`.`template_store_ref` ( `is_copy` tinyint(1) NOT NULL DEFAULT 0 COMMENT 'indicates whether this was copied ', `update_count` bigint unsigned, `ref_cnt` bigint unsigned DEFAULT 0, - `updated` datetime, + `updated` datetime, PRIMARY KEY (`id`), -- CONSTRAINT `fk_template_store_ref__store_id` FOREIGN KEY `fk_template_store_ref__store_id` (`store_id`) REFERENCES `image_store` (`id`) ON DELETE CASCADE, INDEX `i_template_store_ref__store_id`(`store_id`), @@ -193,7 +193,7 @@ CREATE TABLE `cloud`.`template_store_ref` ( -- ALTER TABLE `cloud`.`snapshots` DROP COLUMN `sechost_id`; -- change upload host_id FK to point to image_store table -ALTER TABLE `cloud`.`upload` DROP FOREIGN KEY `fk_upload__host_id`; +ALTER TABLE `cloud`.`upload` DROP FOREIGN KEY `fk_upload__host_id`; ALTER TABLE `cloud`.`upload` ADD CONSTRAINT `fk_upload__store_id` FOREIGN KEY(`host_id`) REFERENCES `image_store` (`id`) ON DELETE CASCADE; CREATE TABLE `cloud`.`snapshot_store_ref` ( @@ -208,11 +208,11 @@ CREATE TABLE `cloud`.`snapshot_store_ref` ( `physical_size` bigint unsigned DEFAULT 0, `parent_snapshot_id` bigint unsigned DEFAULT 0, `install_path` varchar(255), - `state` varchar(255) NOT NULL, - -- `removed` datetime COMMENT 'date removed if not null', + `state` varchar(255) NOT NULL, + -- `removed` datetime COMMENT 'date removed if not null', `update_count` bigint unsigned, `ref_cnt` bigint unsigned, - `updated` datetime, + `updated` datetime, `volume_id` bigint unsigned, PRIMARY KEY (`id`), INDEX `i_snapshot_store_ref__store_id`(`store_id`), @@ -238,11 +238,11 @@ CREATE TABLE `cloud`.`volume_store_ref` ( `install_path` varchar(255), `url` varchar(255), `download_url` varchar(255), - `state` varchar(255) NOT NULL, + `state` varchar(255) NOT NULL, `destroyed` tinyint(1) COMMENT 'indicates whether the volume_host entry was destroyed by the user or not', `update_count` bigint unsigned, `ref_cnt` bigint unsigned, - `updated` datetime, + `updated` datetime, PRIMARY KEY (`id`), CONSTRAINT `fk_volume_store_ref__store_id` FOREIGN KEY `fk_volume_store_ref__store_id` (`store_id`) REFERENCES `image_store` (`id`) ON DELETE CASCADE, INDEX `i_volume_store_ref__store_id`(`store_id`), @@ -662,12 +662,12 @@ ALTER TABLE `cloud`.`remote_access_vpn` ADD COLUMN `id` bigint unsigned NOT NULL ALTER TABLE `cloud`.`remote_access_vpn` ADD COLUMN `uuid` varchar(40) UNIQUE; -- START: support for LXC - + INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES (UUID(), 'LXC', 'default', 50, 1); ALTER TABLE `cloud`.`physical_network_traffic_types` ADD COLUMN `lxc_network_label` varchar(255) DEFAULT 'cloudbr0' COMMENT 'The network name label of the physical device dedicated to this traffic on a LXC host'; - + UPDATE configuration SET value='KVM,XenServer,VMware,BareMetal,Ovm,LXC' WHERE name='hypervisor.list'; - + INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type) VALUES (10, UUID(), 'routing-10', 'SystemVM Template (LXC)', 0, now(), 'SYSTEM', 0, 64, 1, 'http://download.cloudstack.org/templates/acton/acton-systemvm-02062012.qcow2.bz2', '2755de1f9ef2ce4d6f2bee2efbb4da92', 0, 'SystemVM Template (LXC)', 'QCOW2', 15, 0, 1, 'LXC'); @@ -717,10 +717,10 @@ CREATE TABLE `cloud`.`service_offering_details` ( CONSTRAINT `fk_service_offering_details__service_offering_id` FOREIGN KEY (`service_offering_id`) REFERENCES `service_offering`(`id`) ON DELETE CASCADE, CONSTRAINT UNIQUE KEY `uk_service_offering_id_name` (`service_offering_id`, `name`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; - + DROP VIEW IF EXISTS `cloud`.`user_vm_view`; CREATE VIEW `cloud`.`user_vm_view` AS - select + select vm_instance.id id, vm_instance.name name, user_vm.display_name display_name, @@ -898,7 +898,7 @@ CREATE VIEW `cloud`.`user_vm_view` AS DROP VIEW IF EXISTS `cloud`.`affinity_group_view`; CREATE VIEW `cloud`.`affinity_group_view` AS - select + select affinity_group.id id, affinity_group.name name, affinity_group.type type, @@ -933,7 +933,7 @@ CREATE VIEW `cloud`.`affinity_group_view` AS DROP VIEW IF EXISTS `cloud`.`host_view`; CREATE VIEW `cloud`.`host_view` AS - select + select host.id, host.uuid, host.name, @@ -1001,10 +1001,10 @@ CREATE VIEW `cloud`.`host_view` AS `cloud`.`async_job` ON async_job.instance_id = host.id and async_job.instance_type = 'Host' and async_job.job_status = 0; - + DROP VIEW IF EXISTS `cloud`.`storage_pool_view`; CREATE VIEW `cloud`.`storage_pool_view` AS - select + select storage_pool.id, storage_pool.uuid, storage_pool.name, @@ -1024,7 +1024,7 @@ CREATE VIEW `cloud`.`storage_pool_view` AS cluster.cluster_type, data_center.id data_center_id, data_center.uuid data_center_uuid, - data_center.name data_center_name, + data_center.name data_center_name, data_center.networktype data_center_type, host_pod_ref.id pod_id, host_pod_ref.uuid pod_uuid, @@ -1054,11 +1054,11 @@ CREATE VIEW `cloud`.`storage_pool_view` AS `cloud`.`async_job` ON async_job.instance_id = storage_pool.id and async_job.instance_type = 'StoragePool' and async_job.job_status = 0; - + DROP VIEW IF EXISTS `cloud`.`domain_router_view`; CREATE VIEW `cloud`.`domain_router_view` AS - select + select vm_instance.id id, vm_instance.name name, account.id account_id, @@ -1157,7 +1157,7 @@ CREATE VIEW `cloud`.`domain_router_view` AS `cloud`.`async_job` ON async_job.instance_id = vm_instance.id and async_job.instance_type = 'DomainRouter' and async_job.job_status = 0; - + CREATE TABLE `cloud`.`external_cisco_vnmc_devices` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', `uuid` varchar(255) UNIQUE, @@ -1242,7 +1242,7 @@ INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'manag DROP VIEW IF EXISTS `cloud`.`service_offering_view`; CREATE VIEW `cloud`.`service_offering_view` AS - select + select service_offering.id, disk_offering.uuid, disk_offering.name, @@ -1289,7 +1289,7 @@ UPDATE `cloud_usage`.`account` SET `default`=1 WHERE id IN (1,2); UPDATE `cloud`.`user` SET `cloud`.`user`.`default`=1 WHERE id IN (1,2); CREATE OR REPLACE VIEW `cloud`.`user_view` AS - select + select user.id, user.uuid, user.username, @@ -1329,7 +1329,7 @@ CREATE OR REPLACE VIEW `cloud`.`user_view` AS `cloud`.`async_job` ON async_job.instance_id = user.id and async_job.instance_type = 'User' and async_job.job_status = 0; - + DROP VIEW IF EXISTS `cloud`.`account_view`; CREATE VIEW `cloud`.`account_view` AS @@ -1879,7 +1879,7 @@ ALTER TABLE `cloud`.`account_details` MODIFY value varchar(255); DROP VIEW IF EXISTS `cloud`.`template_view`; CREATE VIEW `cloud`.`template_view` AS - select + select vm_template.id, vm_template.uuid, vm_template.unique_name, @@ -1920,7 +1920,7 @@ CREATE VIEW `cloud`.`template_view` AS domain.path domain_path, projects.id project_id, projects.uuid project_uuid, - projects.name project_name, + projects.name project_name, data_center.id data_center_id, data_center.uuid data_center_uuid, data_center.name data_center_name, @@ -1950,23 +1950,23 @@ CREATE VIEW `cloud`.`template_view` AS from `cloud`.`vm_template` inner join - `cloud`.`guest_os` ON guest_os.id = vm_template.guest_os_id + `cloud`.`guest_os` ON guest_os.id = vm_template.guest_os_id inner join `cloud`.`account` ON account.id = vm_template.account_id inner join `cloud`.`domain` ON domain.id = account.domain_id left join - `cloud`.`projects` ON projects.project_account_id = account.id + `cloud`.`projects` ON projects.project_account_id = account.id left join - `cloud`.`vm_template_details` ON vm_template_details.template_id = vm_template.id + `cloud`.`vm_template_details` ON vm_template_details.template_id = vm_template.id left join - `cloud`.`vm_template` source_template ON source_template.id = vm_template.source_template_id + `cloud`.`vm_template` source_template ON source_template.id = vm_template.source_template_id left join `cloud`.`template_store_ref` ON template_store_ref.template_id = vm_template.id and template_store_ref.store_role = 'Image' left join - `cloud`.`image_store` ON image_store.removed is NULL AND template_store_ref.store_id is not NULL AND image_store.id = template_store_ref.store_id + `cloud`.`image_store` ON image_store.removed is NULL AND template_store_ref.store_id is not NULL AND image_store.id = template_store_ref.store_id left join - `cloud`.`template_zone_ref` ON template_zone_ref.template_id = vm_template.id AND template_store_ref.store_id is NULL AND template_zone_ref.removed is null + `cloud`.`template_zone_ref` ON template_zone_ref.template_id = vm_template.id AND template_store_ref.store_id is NULL AND template_zone_ref.removed is null left join `cloud`.`data_center` ON (image_store.data_center_id = data_center.id OR template_zone_ref.zone_id = data_center.id) left join @@ -1974,7 +1974,7 @@ CREATE VIEW `cloud`.`template_view` AS left join `cloud`.`resource_tags` ON resource_tags.resource_id = vm_template.id and (resource_tags.resource_type = 'Template' or resource_tags.resource_type='ISO'); - + INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Network', 'DEFAULT', 'management-server', 'midonet.apiserver.address', 'http://localhost:8081', 'Specify the address at which the Midonet API server can be contacted (if using Midonet)'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Network', 'DEFAULT', 'management-server', 'midonet.providerrouter.id', 'd7c5e6a3-e2f4-426b-b728-b7ce6a0448e5', 'Specifies the UUID of the Midonet provider router (if using Midonet)'); @@ -1996,7 +1996,7 @@ CREATE TABLE `cloud`.`account_vnet_map` ( ALTER TABLE `cloud`.`op_dc_vnet_alloc` ADD COLUMN account_vnet_map_id bigint unsigned; ALTER TABLE `cloud`.`op_dc_vnet_alloc` ADD CONSTRAINT `fk_op_dc_vnet_alloc__account_vnet_map_id` FOREIGN KEY `fk_op_dc_vnet_alloc__account_vnet_map_id` (`account_vnet_map_id`) REFERENCES `account_vnet_map` (`id`); - + update `cloud`.`vm_template` set state='Allocated' where state is NULL; update `cloud`.`vm_template` set update_count=0 where update_count is NULL; @@ -2100,7 +2100,7 @@ CREATE TABLE `cloud`.`vm_disk_statistics` ( CONSTRAINT `fk_vm_disk_statistics__account_id` FOREIGN KEY (`account_id`) REFERENCES `account` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB AUTO_INCREMENT=0 DEFAULT CHARSET=utf8; -insert into `cloud`.`vm_disk_statistics`(data_center_id,account_id,vm_id,volume_id) +insert into `cloud`.`vm_disk_statistics`(data_center_id,account_id,vm_id,volume_id) select volumes.data_center_id, volumes.account_id, vm_instance.id, volumes.id from volumes,vm_instance where vm_instance.vm_type="User" and vm_instance.state<>"Expunging" and volumes.instance_id=vm_instance.id order by vm_instance.id; DROP TABLE IF EXISTS `cloud`.`ovs_providers`; @@ -2166,7 +2166,7 @@ INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'manag INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'vm.disk.throttling.bytes_write_rate', 0, 'Default disk I/O write rate in bytes per second allowed in User vm\'s disk. '); -- Re-enable foreign key checking, at the end of the upgrade path -SET foreign_key_checks = 1; +SET foreign_key_checks = 1; UPDATE `cloud`.`snapshot_policy` set uuid=id WHERE uuid is NULL; #update shared sg enabled network with not null name in Advance Security Group enabled network @@ -2220,7 +2220,7 @@ CREATE TABLE `cloud`.`external_stratosphere_ssp_credentials` ( DROP VIEW IF EXISTS `cloud`.`project_view`; CREATE VIEW `cloud`.`project_view` AS - select + select projects.id, projects.uuid, projects.name, @@ -2264,7 +2264,7 @@ INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Network', 'DEFAULT', 'manage ALTER TABLE `cloud`.`network_offerings` ADD COLUMN `concurrent_connections` int(10) unsigned COMMENT 'Load Balancer(haproxy) maximum number of concurrent connections(global max)'; - + ALTER TABLE `cloud`.`sync_queue` MODIFY `queue_size` smallint(6) NOT NULL DEFAULT '0' COMMENT 'number of items being processed by the queue'; ALTER TABLE `cloud`.`sync_queue` MODIFY `queue_size_limit` smallint(6) NOT NULL DEFAULT '1' COMMENT 'max number of items the queue can process concurrently'; @@ -2280,7 +2280,7 @@ INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'manag #update the account_vmstats_view - count only user vms DROP VIEW IF EXISTS `cloud`.`account_vmstats_view`; CREATE VIEW `cloud`.`account_vmstats_view` AS - SELECT + SELECT account_id, state, count(*) as vmcount from `cloud`.`vm_instance` @@ -2329,7 +2329,7 @@ INSERT IGNORE INTO `cloud`.`configuration` VALUES ("Advanced", 'DEFAULT', 'manag DROP VIEW IF EXISTS `cloud`.`data_center_view`; CREATE VIEW `cloud`.`data_center_view` AS - select + select data_center.id, data_center.uuid, data_center.name, diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41310to41400.sql b/engine/schema/src/main/resources/META-INF/db/schema-41310to41400.sql index fbbf0a2aef8..603e7712ebc 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41310to41400.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41310to41400.sql @@ -19,7 +19,7 @@ -- Schema upgrade from 4.13.1.0 to 4.14.0.0 --; --- Update the description to indicate this only works with KVM + Ceph +-- Update the description to indicate this only works with KVM + Ceph -- (not implemented properly atm for KVM+NFS/local, and it accidentally works with XS + NFS. Not applicable for VMware) UPDATE `cloud`.`configuration` SET `description`='Indicates whether to always backup primary storage snapshot to secondary storage. Keeping snapshots only on Primary storage is applicable for KVM + Ceph only.' WHERE `name`='snapshot.backup.to.secondary'; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41520to41600.sql b/engine/schema/src/main/resources/META-INF/db/schema-41520to41600.sql index a414f244b08..2464a8a57ce 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41520to41600.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41520to41600.sql @@ -123,20 +123,6 @@ CREATE VIEW `cloud`.`service_offering_view` AS GROUP BY `service_offering`.`id`; ---; --- Stored procedure to do idempotent column add; --- This is copied from schema-41000to41100.sql ---; -DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_ADD_COLUMN`; - -CREATE PROCEDURE `cloud`.`IDEMPOTENT_ADD_COLUMN` ( - IN in_table_name VARCHAR(200), - IN in_column_name VARCHAR(200), - IN in_column_definition VARCHAR(1000) -) -BEGIN - - DECLARE CONTINUE HANDLER FOR 1060 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', 'ADD COLUMN') ; SET @ddl = CONCAT(@ddl, ' ', in_column_name); SET @ddl = CONCAT(@ddl, ' ', in_column_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.account','created', 'datetime DEFAULT NULL COMMENT ''date created'' AFTER `state` '); CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.domain','created', 'datetime DEFAULT NULL COMMENT ''date created'' AFTER `next_child_seq` '); @@ -730,39 +716,6 @@ ALTER TABLE `cloud`.`annotations` ADD COLUMN `admins_only` tinyint(1) unsigned N -- Add uuid for ssh keypairs ALTER TABLE `cloud`.`ssh_keypairs` ADD COLUMN `uuid` varchar(40) AFTER `id`; --- PR#4699 Drop the procedure `ADD_GUEST_OS_AND_HYPERVISOR_MAPPING` if it already exist. -DROP PROCEDURE IF EXISTS `cloud`.`ADD_GUEST_OS_AND_HYPERVISOR_MAPPING`; - --- PR#4699 Create the procedure `ADD_GUEST_OS_AND_HYPERVISOR_MAPPING` to add guest_os and guest_os_hypervisor mapping. -CREATE PROCEDURE `cloud`.`ADD_GUEST_OS_AND_HYPERVISOR_MAPPING` ( - IN guest_os_category_id bigint(20) unsigned, - IN guest_os_display_name VARCHAR(255), - IN guest_os_hypervisor_hypervisor_type VARCHAR(32), - IN guest_os_hypervisor_hypervisor_version VARCHAR(32), - IN guest_os_hypervisor_guest_os_name VARCHAR(255) -) -BEGIN - INSERT INTO cloud.guest_os (uuid, category_id, display_name, created) - SELECT UUID(), guest_os_category_id, guest_os_display_name, now() - FROM DUAL - WHERE not exists( SELECT 1 - FROM cloud.guest_os - WHERE cloud.guest_os.category_id = guest_os_category_id - AND cloud.guest_os.display_name = guest_os_display_name) - -; INSERT INTO cloud.guest_os_hypervisor (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created) - SELECT UUID(), guest_os_hypervisor_hypervisor_type, guest_os_hypervisor_hypervisor_version, guest_os_hypervisor_guest_os_name, guest_os.id, now() - FROM cloud.guest_os - WHERE guest_os.category_id = guest_os_category_id - AND guest_os.display_name = guest_os_display_name - AND NOT EXISTS (SELECT 1 - FROM cloud.guest_os_hypervisor as hypervisor - WHERE hypervisor_type = guest_os_hypervisor_hypervisor_type - AND hypervisor_version = guest_os_hypervisor_hypervisor_version - AND hypervisor.guest_os_id = guest_os.id - AND hypervisor.guest_os_name = guest_os_hypervisor_guest_os_name) -;END; - -- PR#4699 Call procedure `ADD_GUEST_OS_AND_HYPERVISOR_MAPPING` to add new data to guest_os and guest_os_hypervisor. CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 20.04 LTS', 'KVM', 'default', 'Ubuntu 20.04 LTS'); CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (10, 'Ubuntu 21.04', 'KVM', 'default', 'Ubuntu 21.04'); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41600to41610.sql b/engine/schema/src/main/resources/META-INF/db/schema-41600to41610.sql index d53e2181cef..2a2ae668dae 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41600to41610.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41600to41610.sql @@ -21,60 +21,6 @@ ALTER TABLE `cloud`.`vm_work_job` ADD COLUMN `secondary_object` char(100) COMMENT 'any additional item that must be checked during queueing' AFTER `vm_instance_id`; --- Stored procedures to handle cloud and cloud_schema changes - --- Idempotent ADD COLUMN -DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_ADD_COLUMN`; -CREATE PROCEDURE `cloud`.`IDEMPOTENT_ADD_COLUMN` ( - IN in_table_name VARCHAR(200) -, IN in_column_name VARCHAR(200) -, IN in_column_definition VARCHAR(1000) -) -BEGIN - DECLARE CONTINUE HANDLER FOR 1060 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', 'ADD COLUMN') ; SET @ddl = CONCAT(@ddl, ' ', in_column_name); SET @ddl = CONCAT(@ddl, ' ', in_column_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; - --- Idempotent ADD COLUMN -DROP PROCEDURE IF EXISTS `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`; -CREATE PROCEDURE `cloud_usage`.`IDEMPOTENT_ADD_COLUMN` ( - IN in_table_name VARCHAR(200) -, IN in_column_name VARCHAR(200) -, IN in_column_definition VARCHAR(1000) -) -BEGIN - DECLARE CONTINUE HANDLER FOR 1060 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', 'ADD COLUMN') ; SET @ddl = CONCAT(@ddl, ' ', in_column_name); SET @ddl = CONCAT(@ddl, ' ', in_column_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; - --- Idempotent DROP INDEX -DROP PROCEDURE IF EXISTS `cloud_usage`.`IDEMPOTENT_DROP_INDEX`; -CREATE PROCEDURE `cloud_usage`.`IDEMPOTENT_DROP_INDEX` ( - IN in_index_name VARCHAR(200) -, IN in_table_name VARCHAR(200) -) -BEGIN - DECLARE CONTINUE HANDLER FOR 1091 BEGIN END; SET @ddl = CONCAT('DROP INDEX ', in_index_name); SET @ddl = CONCAT(@ddl, ' ', ' ON ') ; SET @ddl = CONCAT(@ddl, ' ', in_table_name); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; - --- Idempotent ADD UNIQUE INDEX -DROP PROCEDURE IF EXISTS `cloud_usage`.`IDEMPOTENT_ADD_UNIQUE_INDEX`; -CREATE PROCEDURE `cloud_usage`.`IDEMPOTENT_ADD_UNIQUE_INDEX` ( - IN in_table_name VARCHAR(200) -, IN in_index_name VARCHAR(200) -, IN in_index_definition VARCHAR(1000) -) -BEGIN - DECLARE CONTINUE HANDLER FOR 1061 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', 'ADD UNIQUE INDEX ', in_index_name); SET @ddl = CONCAT(@ddl, ' ', in_index_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; - --- Idempotent CHANGE COLUMN -DROP PROCEDURE IF EXISTS `cloud_usage`.`IDEMPOTENT_CHANGE_COLUMN`; -CREATE PROCEDURE `cloud_usage`.`IDEMPOTENT_CHANGE_COLUMN` ( - IN in_table_name VARCHAR(200) -, IN in_old_column_name VARCHAR(200) -, IN in_new_column_name VARCHAR(200) -, IN in_column_definition VARCHAR(1000) -) -BEGIN - DECLARE CONTINUE HANDLER FOR 1060 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', ' CHANGE COLUMN') ; SET @ddl = CONCAT(@ddl, ' ', in_old_column_name); SET @ddl = CONCAT(@ddl, ' ', in_new_column_name); SET @ddl = CONCAT(@ddl, ' ', in_column_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; - --- Invoke stored procedures to add primary keys on missing tables - -- Add PK to cloud.op_user_stats_log CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.op_user_stats_log', 'id', 'BIGINT(20) NOT NULL AUTO_INCREMENT FIRST, ADD PRIMARY KEY (`id`)'); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41610to41700.sql b/engine/schema/src/main/resources/META-INF/db/schema-41610to41700.sql index 8417ec29640..ca07c25aaa5 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41610to41700.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41610to41700.sql @@ -219,21 +219,6 @@ CREATE VIEW `cloud`.`service_offering_view` AS `service_offering`.`id`; ---; --- Stored procedure to do idempotent column add; --- This is copied from schema-41000to41100.sql ---; -DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_ADD_COLUMN`; - -CREATE PROCEDURE `cloud`.`IDEMPOTENT_ADD_COLUMN` ( - IN in_table_name VARCHAR(200), - IN in_column_name VARCHAR(200), - IN in_column_definition VARCHAR(1000) -) -BEGIN - - DECLARE CONTINUE HANDLER FOR 1060 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', 'ADD COLUMN') ; SET @ddl = CONCAT(@ddl, ' ', in_column_name); SET @ddl = CONCAT(@ddl, ' ', in_column_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; - CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.volumes','external_uuid', 'VARCHAR(40) DEFAULT null '); DROP VIEW IF EXISTS `cloud`.`volume_view`; @@ -937,35 +922,5 @@ INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(uuid, hypervisor_type, hype -- Copy XenServer 8.2.0 hypervisor guest OS mappings to XenServer 8.2.1 INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'Xenserver', '8.2.1', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='Xenserver' AND hypervisor_version='8.2.0'; -DROP PROCEDURE IF EXISTS `cloud`.`ADD_GUEST_OS_AND_HYPERVISOR_MAPPING`; -CREATE PROCEDURE `cloud`.`ADD_GUEST_OS_AND_HYPERVISOR_MAPPING` ( - IN guest_os_category_id bigint(20) unsigned, - IN guest_os_display_name VARCHAR(255), - IN guest_os_hypervisor_hypervisor_type VARCHAR(32), - IN guest_os_hypervisor_hypervisor_version VARCHAR(32), - IN guest_os_hypervisor_guest_os_name VARCHAR(255) - ) -BEGIN -INSERT INTO cloud.guest_os (uuid, category_id, display_name, created) -SELECT UUID(), guest_os_category_id, guest_os_display_name, now() -FROM DUAL -WHERE not exists( SELECT 1 - FROM cloud.guest_os - WHERE cloud.guest_os.category_id = guest_os_category_id - AND cloud.guest_os.display_name = guest_os_display_name) - -; INSERT INTO cloud.guest_os_hypervisor (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created) - SELECT UUID(), guest_os_hypervisor_hypervisor_type, guest_os_hypervisor_hypervisor_version, guest_os_hypervisor_guest_os_name, guest_os.id, now() - FROM cloud.guest_os - WHERE guest_os.category_id = guest_os_category_id - AND guest_os.display_name = guest_os_display_name - AND NOT EXISTS (SELECT 1 - FROM cloud.guest_os_hypervisor as hypervisor - WHERE hypervisor_type = guest_os_hypervisor_hypervisor_type - AND hypervisor_version = guest_os_hypervisor_hypervisor_version - AND hypervisor.guest_os_id = guest_os.id - AND hypervisor.guest_os_name = guest_os_hypervisor_guest_os_name) -;END; - CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 11 (64-bit)', 'XenServer', '8.2.1', 'Debian Bullseye 11'); CALL ADD_GUEST_OS_AND_HYPERVISOR_MAPPING (2, 'Debian GNU/Linux 11 (32-bit)', 'XenServer', '8.2.1', 'Debian Bullseye 11'); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41720to41800.sql b/engine/schema/src/main/resources/META-INF/db/schema-41720to41800.sql index c51d5a43045..9f38de11819 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41720to41800.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41720to41800.sql @@ -214,16 +214,6 @@ CREATE VIEW `cloud`.`domain_router_view` AS and async_job.instance_type = 'DomainRouter' and async_job.job_status = 0; --- Idempotent ADD COLUMN -DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_ADD_COLUMN`; -CREATE PROCEDURE `cloud`.`IDEMPOTENT_ADD_COLUMN` ( - IN in_table_name VARCHAR(200) -, IN in_column_name VARCHAR(200) -, IN in_column_definition VARCHAR(1000) -) -BEGIN - DECLARE CONTINUE HANDLER FOR 1060 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', 'ADD COLUMN') ; SET @ddl = CONCAT(@ddl, ' ', in_column_name); SET @ddl = CONCAT(@ddl, ' ', in_column_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; - -- Add passphrase table CREATE TABLE IF NOT EXISTS `cloud`.`passphrase` ( `id` bigint unsigned NOT NULL auto_increment, @@ -433,45 +423,6 @@ WHERE roles.role_type != 'Admin' AND roles.is_default = 1 AND role_perm.rule = ' -- VM autoscaling --- Idempotent ADD COLUMN -DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_ADD_COLUMN`; -CREATE PROCEDURE `cloud`.`IDEMPOTENT_ADD_COLUMN` ( - IN in_table_name VARCHAR(200) -, IN in_column_name VARCHAR(200) -, IN in_column_definition VARCHAR(1000) -) -BEGIN - DECLARE CONTINUE HANDLER FOR 1060 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', 'ADD COLUMN') ; SET @ddl = CONCAT(@ddl, ' ', in_column_name); SET @ddl = CONCAT(@ddl, ' ', in_column_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; - --- Idempotent RENAME COLUMN -DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_CHANGE_COLUMN`; -CREATE PROCEDURE `cloud`.`IDEMPOTENT_CHANGE_COLUMN` ( - IN in_table_name VARCHAR(200) -, IN in_column_name VARCHAR(200) -, IN in_column_new_name VARCHAR(200) -, IN in_column_new_definition VARCHAR(1000) -) -BEGIN - DECLARE CONTINUE HANDLER FOR 1054 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', 'CHANGE COLUMN') ; SET @ddl = CONCAT(@ddl, ' ', in_column_name); SET @ddl = CONCAT(@ddl, ' ', in_column_new_name); SET @ddl = CONCAT(@ddl, ' ', in_column_new_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; - --- Idempotent ADD UNIQUE KEY -DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_ADD_UNIQUE_KEY`; -CREATE PROCEDURE `cloud`.`IDEMPOTENT_ADD_UNIQUE_KEY` ( - IN in_table_name VARCHAR(200) -, IN in_key_name VARCHAR(200) -, IN in_key_definition VARCHAR(1000) -) -BEGIN - DECLARE CONTINUE HANDLER FOR 1061 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', 'ADD UNIQUE KEY ', in_key_name); SET @ddl = CONCAT(@ddl, ' ', in_key_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; - --- Idempotent DROP FOREIGN KEY -DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_DROP_FOREIGN_KEY`; -CREATE PROCEDURE `cloud`.`IDEMPOTENT_DROP_FOREIGN_KEY` ( - IN in_table_name VARCHAR(200) -, IN in_foreign_key_name VARCHAR(200) -) -BEGIN - DECLARE CONTINUE HANDLER FOR 1091, 1025 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', ' DROP FOREIGN KEY '); SET @ddl = CONCAT(@ddl, ' ', in_foreign_key_name); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; -- Add column 'supports_vm_autoscaling' to 'network_offerings' table CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.network_offerings', 'supports_vm_autoscaling', 'boolean default false'); @@ -1186,16 +1137,6 @@ CREATE TABLE IF NOT EXISTS `cloud`.`tungsten_lb_health_monitor` ( ) ENGINE=InnoDB DEFAULT CHARSET=utf8; --- #6888 add index to speed up querying IPs in the network-tab -DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_ADD_KEY`; - -CREATE PROCEDURE `cloud`.`IDEMPOTENT_ADD_KEY` ( - IN in_index_name VARCHAR(200) - , IN in_table_name VARCHAR(200) - , IN in_key_definition VARCHAR(1000) -) -BEGIN - - DECLARE CONTINUE HANDLER FOR 1061 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', ' ADD KEY ') ; SET @ddl = CONCAT(@ddl, ' ', in_index_name); SET @ddl = CONCAT(@ddl, ' ', in_key_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; CALL `cloud`.`IDEMPOTENT_ADD_KEY`('i_user_ip_address_state','user_ip_address', '(state)'); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41900to41910-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-41900to41910-cleanup.sql index b580d42686f..2d57db2b778 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41900to41910-cleanup.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41900to41910-cleanup.sql @@ -18,3 +18,7 @@ --; -- Schema upgrade cleanup from 4.19.0.0 to 4.19.1.0 --; + +-- List VMs response optimisation, don't sum during API handling +UPDATE cloud.configuration set value='false' where name='vm.stats.increment.metrics'; +DELETE from cloud.configuration where name='vm.stats.increment.metrics.in.memory'; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41900to41910.sql b/engine/schema/src/main/resources/META-INF/db/schema-41900to41910.sql index bdb23d9844c..0cb10f4a0ef 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41900to41910.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41900to41910.sql @@ -65,3 +65,8 @@ CREATE TABLE IF NOT EXISTS `cloud_usage`.`usage_vpc` ( CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.cloud_usage', 'state', 'VARCHAR(100) DEFAULT NULL'); CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.user_data', 'removed', 'datetime COMMENT "date removed or null, if still present"'); + +-- Update options for config - host.allocators.order +UPDATE `cloud`.`configuration` SET + `options` = 'FirstFitRouting,RandomAllocator,TestingAllocator,FirstFitAllocator,RecreateHostAllocator' +WHERE `name` = 'host.allocators.order'; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41910to42000.sql b/engine/schema/src/main/resources/META-INF/db/schema-41910to42000.sql index 1764af7b2ac..2ab86ea7d89 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41910to42000.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41910to42000.sql @@ -23,19 +23,30 @@ CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.resource_limit', 'tag', 'varchar(64) DEFAULT NULL COMMENT "tag for the limit" '); CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.resource_count', 'tag', 'varchar(64) DEFAULT NULL COMMENT "tag for the resource count" '); CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.resource_reservation', 'tag', 'varchar(64) DEFAULT NULL COMMENT "tag for the resource reservation" '); -ALTER TABLE `resource_count` -DROP INDEX `i_resource_count__type_accountId`, -DROP INDEX `i_resource_count__type_domaintId`, -ADD UNIQUE INDEX `i_resource_count__type_tag_accountId` (`type`,`tag`,`account_id`), -ADD UNIQUE INDEX `i_resource_count__type_tag_domaintId` (`type`,`tag`,`domain_id`); +CALL `cloud`.`IDEMPOTENT_DROP_INDEX`('i_resource_count__type_accountId', 'cloud.resource_count'); +CALL `cloud`.`IDEMPOTENT_DROP_INDEX`('i_resource_count__type_domaintId', 'cloud.resource_count'); +DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_ADD_UNIQUE_INDEX`; +CREATE PROCEDURE `cloud`.`IDEMPOTENT_ADD_UNIQUE_INDEX` ( + IN in_table_name VARCHAR(200), + IN in_index_name VARCHAR(200), + IN in_index_definition VARCHAR(1000) +) +BEGIN + DECLARE CONTINUE HANDLER FOR 1061 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name, ' ', 'ADD UNIQUE INDEX ', in_index_name, ' ', in_index_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; -ALTER TABLE `cloud`.`resource_reservation` - ADD COLUMN `resource_id` bigint unsigned NULL; +CALL `cloud`.`IDEMPOTENT_ADD_UNIQUE_INDEX`('cloud.resource_count', 'i_resource_count__type_tag_accountId', '(type, tag, account_id)'); +CALL `cloud`.`IDEMPOTENT_ADD_UNIQUE_INDEX`('cloud.resource_count', 'i_resource_count__type_tag_domainId', '(type, tag, domain_id)'); ALTER TABLE `cloud`.`resource_reservation` MODIFY COLUMN `amount` bigint NOT NULL; +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.resource_reservation', 'resource_id', 'bigint unsigned NULL COMMENT "id of the resource" '); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.resource_reservation', 'mgmt_server_id', 'bigint unsigned NULL COMMENT "management server id" '); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.resource_reservation', 'created', 'datetime DEFAULT NULL COMMENT "date when the reservation was created" '); + +UPDATE `cloud`.`resource_reservation` SET `created` = now() WHERE created IS NULL; + -- Update Default System offering for Router to 512MiB UPDATE `cloud`.`service_offering` SET ram_size = 512 WHERE unique_name IN ("Cloud.Com-SoftwareRouter", "Cloud.Com-SoftwareRouter-Local", @@ -70,7 +81,6 @@ CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.network_offerings','nsx_mode', 'varc CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.vpc_offerings','for_nsx', 'int(1) unsigned DEFAULT "0" COMMENT "is nsx enabled for the resource"'); CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.vpc_offerings','nsx_mode', 'varchar(32) COMMENT "mode in which the network would route traffic"'); - -- Create table to persist quota email template configurations CREATE TABLE IF NOT EXISTS `cloud_usage`.`quota_email_configuration`( `account_id` int(11) NOT NULL, @@ -80,9 +90,342 @@ CREATE TABLE IF NOT EXISTS `cloud_usage`.`quota_email_configuration`( CONSTRAINT `FK_quota_email_configuration_account_id` FOREIGN KEY (`account_id`) REFERENCES `cloud_usage`.`quota_account`(`account_id`), CONSTRAINT `FK_quota_email_configuration_email_template_id` FOREIGN KEY (`email_template_id`) REFERENCES `cloud_usage`.`quota_email_templates`(`id`)); +-- Remove on delete cascade from snapshot schedule +ALTER TABLE `cloud`.`snapshot_schedule` DROP CONSTRAINT `fk__snapshot_schedule_async_job_id`; + -- Add `is_implicit` column to `host_tags` table CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.host_tags', 'is_implicit', 'int(1) UNSIGNED NOT NULL DEFAULT 0 COMMENT "If host tag is implicit or explicit" '); +-- Fields related to Snapshot Extraction +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.snapshot_store_ref', 'download_url', 'varchar(2048) DEFAULT NULL'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.snapshot_store_ref', 'download_url_created', 'datetime DEFAULT NULL'); + +-- Webhooks feature +DROP TABLE IF EXISTS `cloud`.`webhook`; +CREATE TABLE `cloud`.`webhook` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id of the webhook', + `uuid` varchar(255) COMMENT 'uuid of the webhook', + `name` varchar(255) NOT NULL COMMENT 'name of the webhook', + `description` varchar(4096) COMMENT 'description for the webhook', + `state` char(32) NOT NULL COMMENT 'state of the webhook - Enabled or Disabled', + `domain_id` bigint unsigned NOT NULL COMMENT 'id of the owner domain of the webhook', + `account_id` bigint unsigned NOT NULL COMMENT 'id of the owner account of the webhook', + `payload_url` varchar(255) COMMENT 'payload URL for the webhook', + `secret_key` varchar(255) COMMENT 'secret key for the webhook', + `ssl_verification` boolean COMMENT 'for https payload url, if true then strict ssl verification', + `scope` char(32) NOT NULL COMMENT 'scope for the webhook - Local, Domain, Global', + `created` datetime COMMENT 'date the webhook was created', + `removed` datetime COMMENT 'date removed if not null', + PRIMARY KEY(`id`), + INDEX `i_webhook__account_id`(`account_id`), + CONSTRAINT `fk_webhook__account_id` FOREIGN KEY (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +DROP TABLE IF EXISTS `cloud`.`webhook_delivery`; +CREATE TABLE `cloud`.`webhook_delivery` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id of the webhook delivery', + `uuid` varchar(255) COMMENT 'uuid of the webhook', + `event_id` bigint unsigned NOT NULL COMMENT 'id of the event', + `webhook_id` bigint unsigned NOT NULL COMMENT 'id of the webhook', + `mshost_msid` bigint unsigned NOT NULL COMMENT 'msid of the management server', + `headers` TEXT COMMENT 'headers for the webhook delivery', + `payload` TEXT COMMENT 'payload for the webhook delivery', + `success` boolean COMMENT 'webhook delivery succeeded or not', + `response` TEXT COMMENT 'response of the webhook delivery', + `start_time` datetime COMMENT 'start timestamp of the webhook delivery', + `end_time` datetime COMMENT 'end timestamp of the webhook delivery', + PRIMARY KEY(`id`), + INDEX `i_webhook__event_id`(`event_id`), + INDEX `i_webhook__webhook_id`(`webhook_id`), + CONSTRAINT `fk_webhook__event_id` FOREIGN KEY (`event_id`) REFERENCES `event`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_webhook__webhook_id` FOREIGN KEY (`webhook_id`) REFERENCES `webhook`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- Normalize quota.usage.smtp.useStartTLS, quota.usage.smtp.useAuth, alert.smtp.useAuth and project.smtp.useAuth values +UPDATE + `cloud`.`configuration` +SET + value = "true" +WHERE + name IN ("quota.usage.smtp.useStartTLS", "quota.usage.smtp.useAuth", "alert.smtp.useAuth", "project.smtp.useAuth") + AND value IN ("true", "y", "t", "1", "on", "yes"); + +UPDATE + `cloud`.`configuration` +SET + value = "false" +WHERE + name IN ("quota.usage.smtp.useStartTLS", "quota.usage.smtp.useAuth", "alert.smtp.useAuth", "project.smtp.useAuth") + AND value NOT IN ("true", "y", "t", "1", "on", "yes"); + +-- Create tables for static and dynamic routing +CREATE TABLE `cloud`.`dc_ip4_guest_subnets` ( + `id` bigint unsigned NOT NULL auto_increment COMMENT 'id', + `uuid` varchar(40) DEFAULT NULL, + `data_center_id` bigint(20) unsigned NOT NULL COMMENT 'zone it belongs to', + `subnet` varchar(255) NOT NULL COMMENT 'subnet of the ip4 network', + `domain_id` bigint unsigned DEFAULT NULL COMMENT 'domain the subnet belongs to', + `account_id` bigint unsigned DEFAULT NULL COMMENT 'owner of this subnet', + `created` datetime DEFAULT NULL, + `removed` datetime DEFAULT NULL, + PRIMARY KEY (`id`), + CONSTRAINT `fk_dc_ip4_guest_subnets__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `data_center`(`id`), + CONSTRAINT `fk_dc_ip4_guest_subnets__domain_id` FOREIGN KEY (`domain_id`) REFERENCES `domain`(`id`), + CONSTRAINT `fk_dc_ip4_guest_subnets__account_id` FOREIGN KEY (`account_id`) REFERENCES `account`(`id`), + CONSTRAINT `uc_dc_ip4_guest_subnets__uuid` UNIQUE (`uuid`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`ip4_guest_subnet_network_map` ( + `id` bigint unsigned NOT NULL auto_increment COMMENT 'id', + `uuid` varchar(40) DEFAULT NULL, + `parent_id` bigint(20) unsigned COMMENT 'ip4 guest subnet which subnet belongs to', + `subnet` varchar(255) NOT NULL COMMENT 'subnet of the ip4 network', + `network_id` bigint(20) unsigned DEFAULT NULL COMMENT 'network which subnet is associated to', + `vpc_id` bigint(20) unsigned DEFAULT NULL COMMENT 'VPC which subnet is associated to', + `state` varchar(255) NOT NULL COMMENT 'state of the subnet', + `allocated` datetime DEFAULT NULL, + `created` datetime DEFAULT NULL, + `removed` datetime DEFAULT NULL, + PRIMARY KEY (`id`), + CONSTRAINT `fk_ip4_guest_subnet_network_map__parent_id` FOREIGN KEY (`parent_id`) REFERENCES `dc_ip4_guest_subnets`(`id`), + CONSTRAINT `fk_ip4_guest_subnet_network_map__network_id` FOREIGN KEY (`network_id`) REFERENCES `networks`(`id`), + CONSTRAINT `fk_ip4_guest_subnet_network_map__vpc_id` FOREIGN KEY (`vpc_id`) REFERENCES `vpc`(`id`), + CONSTRAINT `uc_ip4_guest_subnet_network_map__uuid` UNIQUE (`uuid`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CALL `cloud`.`IDEMPOTENT_CHANGE_COLUMN`('network_offerings', 'nsx_mode', 'network_mode', 'varchar(32) COMMENT "mode in which the network would route traffic"'); +CALL `cloud`.`IDEMPOTENT_CHANGE_COLUMN`('vpc_offerings', 'nsx_mode', 'network_mode', 'varchar(32) COMMENT "mode in which the network would route traffic"'); +ALTER TABLE `cloud`.`event` MODIFY COLUMN `type` varchar(50) NOT NULL; + +-- Add tables for AS Numbers and range +CREATE TABLE IF NOT EXISTS `cloud`.`as_number_range` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `uuid` varchar(40) DEFAULT NULL, + `data_center_id` bigint unsigned NOT NULL COMMENT 'zone that it belongs to', + `start_as_number` bigint unsigned NOT NULL COMMENT 'start AS number of the range', + `end_as_number` bigint unsigned NOT NULL COMMENT 'end AS number of the range', + `created` datetime DEFAULT NULL COMMENT 'date created', + `removed` datetime DEFAULT NULL COMMENT 'date removed', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_as_number_range__uuid` (`uuid`), + UNIQUE KEY `uk_as_number_range__range` (`data_center_id`,`start_as_number`,`end_as_number`, `removed`), + CONSTRAINT `fk_as_number_range__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `data_center` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `cloud`.`as_number` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `uuid` varchar(40) DEFAULT NULL, + `account_id` bigint unsigned DEFAULT NULL, + `domain_id` bigint unsigned DEFAULT NULL, + `as_number` bigint unsigned NOT NULL COMMENT 'the AS Number', + `as_number_range_id` bigint unsigned NOT NULL, + `data_center_id` bigint unsigned NOT NULL COMMENT 'zone that it belongs to', + `allocated` datetime DEFAULT NULL COMMENT 'Date this AS Number was allocated to some network', + `is_allocated` tinyint(1) NOT NULL DEFAULT 0 COMMENT 'indicates if the AS Number is allocated to some network', + `network_id` bigint unsigned DEFAULT NULL COMMENT 'Network this AS Number is associated with', + `vpc_id` bigint unsigned DEFAULT NULL COMMENT 'VPC this AS Number is associated with', + `created` datetime DEFAULT NULL COMMENT 'date created', + `removed` datetime DEFAULT NULL COMMENT 'date removed', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_as_number__uuid` (`uuid`), + UNIQUE KEY `uk_as_number__number` (`data_center_id`,`as_number`,`as_number_range_id`), + CONSTRAINT `fk_as_number__account_id` FOREIGN KEY (`account_id`) REFERENCES `account` (`id`), + CONSTRAINT `fk_as_number__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `data_center` (`id`) ON DELETE CASCADE, + CONSTRAINT `fk_as_number__network_id` FOREIGN KEY (`network_id`) REFERENCES `networks` (`id`), + CONSTRAINT `fk_as_number__as_number_range_id` FOREIGN KEY (`as_number_range_id`) REFERENCES `as_number_range` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.network_offerings','routing_mode', 'varchar(10) COMMENT "routing mode for the offering"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.network_offerings','specify_as_number', 'tinyint(1) NOT NULL DEFAULT 0 COMMENT "specify AS number when using dynamic routing"'); + +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.vpc_offerings','routing_mode', 'varchar(10) COMMENT "routing mode for the offering"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.vpc_offerings','specify_as_number', 'tinyint(1) NOT NULL DEFAULT 0 COMMENT "specify AS number when using dynamic routing"'); + +-- Tables for Dynamic Routing +CREATE TABLE IF NOT EXISTS `cloud`.`bgp_peers` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `uuid` varchar(40) DEFAULT NULL, + `data_center_id` bigint(20) unsigned NOT NULL COMMENT 'zone it belongs to', + `ip4_address` varchar(40) DEFAULT NULL COMMENT 'IPv4 address of the BGP peer', + `ip6_address` varchar(40) DEFAULT NULL COMMENT 'IPv6 address of the BGP peer', + `as_number` bigint unsigned NOT NULL COMMENT 'AS number of the BGP peer', + `password` varchar(255) DEFAULT NULL COMMENT 'Password of the BGP peer', + `domain_id` bigint unsigned DEFAULT NULL COMMENT 'domain the subnet belongs to', + `account_id` bigint unsigned DEFAULT NULL COMMENT 'owner of this subnet', + `created` datetime DEFAULT NULL COMMENT 'date created', + `removed` datetime DEFAULT NULL COMMENT 'date removed', + PRIMARY KEY (`id`), + UNIQUE KEY `uk_bgp_peers__uuid` (`uuid`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`bgp_peer_details` ( + `id` bigint unsigned NOT NULL auto_increment, + `bgp_peer_id` bigint unsigned NOT NULL COMMENT 'bgp peer id', + `name` varchar(255) NOT NULL, + `value` varchar(1024) NOT NULL, + `display` tinyint(1) NOT NULL DEFAULT '1' COMMENT 'True if the detail can be displayed to the end user', + PRIMARY KEY (`id`), + CONSTRAINT `fk_bgp_peer_details__bgp_peer_id` FOREIGN KEY `fk_bgp_peer_details__bgp_peer_id`(`bgp_peer_id`) REFERENCES `bgp_peers`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE IF NOT EXISTS `cloud`.`bgp_peer_network_map` ( + `id` bigint unsigned NOT NULL auto_increment COMMENT 'id', + `bgp_peer_id` bigint(20) unsigned COMMENT 'id of the BGP peer', + `network_id` bigint(20) unsigned DEFAULT NULL COMMENT 'network which BGP peer is associated to', + `vpc_id` bigint(20) unsigned DEFAULT NULL COMMENT 'vpc which BGP peer is associated to', + `state` varchar(40) DEFAULT NULL, + `created` datetime DEFAULT NULL COMMENT 'date created', + `removed` datetime DEFAULT NULL COMMENT 'date removed', + PRIMARY KEY (`id`), + CONSTRAINT `fk_bgp_peer_network_map__bgp_peer_id` FOREIGN KEY (`bgp_peer_id`) REFERENCES `bgp_peers`(`id`), + CONSTRAINT `fk_bgp_peer_network_map__network_id` FOREIGN KEY (`network_id`) REFERENCES `networks`(`id`), + CONSTRAINT `fk_bgp_peer_network_map__vpc_id` FOREIGN KEY (`vpc_id`) REFERENCES `vpc`(`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`shared_filesystem`( + `id` bigint unsigned NOT NULL auto_increment COMMENT 'ID', + `uuid` varchar(40) COMMENT 'UUID', + `name` varchar(255) NOT NULL COMMENT 'Name of the shared filesystem', + `description` varchar(1024) COMMENT 'Description', + `domain_id` bigint unsigned NOT NULL COMMENT 'Domain ID', + `account_id` bigint unsigned NOT NULL COMMENT 'Account ID', + `data_center_id` bigint unsigned NOT NULL COMMENT 'Data center ID', + `state` varchar(12) NOT NULL COMMENT 'State of the shared filesystem in the FSM', + `fs_provider_name` varchar(255) COMMENT 'Name of the shared filesystem provider', + `protocol` varchar(10) COMMENT 'Protocol supported by the shared filesystem', + `volume_id` bigint unsigned COMMENT 'Volume which the shared filesystem is using as storage', + `vm_id` bigint unsigned COMMENT 'vm on which the shared filesystem is hosted', + `fs_type` varchar(10) NOT NULL COMMENT 'The filesystem format to be used for the shared filesystem', + `service_offering_id` bigint unsigned COMMENT 'Service offering for the vm', + `update_count` bigint unsigned COMMENT 'Update count for state change', + `updated` datetime COMMENT 'date updated', + `created` datetime NOT NULL COMMENT 'date created', + `removed` datetime COMMENT 'date removed if not null', + PRIMARY KEY (`id`), + CONSTRAINT `uc_shared_filesystem__uuid` UNIQUE (`uuid`), + INDEX `i_shared_filesystem__account_id`(`account_id`), + INDEX `i_shared_filesystem__domain_id`(`domain_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- Quota inject tariff result into subsequent ones +CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.quota_tariff', 'position', 'bigint(20) NOT NULL DEFAULT 1 COMMENT "Position in the execution sequence for tariffs of the same type"'); + +-- Idempotent IDEMPOTENT_MODIFY_COLUMN_CHAR_SET +DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`; +CREATE PROCEDURE `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET` ( + IN in_table_name VARCHAR(200) +, IN in_column_name VARCHAR(200) +, IN in_column_type VARCHAR(200) +, IN in_column_definition VARCHAR(1000) +) +BEGIN + DECLARE CONTINUE HANDLER FOR 1060 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', ' MODIFY COLUMN') ; SET @ddl = CONCAT(@ddl, ' ', in_column_name); SET @ddl = CONCAT(@ddl, ' ', in_column_type); SET @ddl = CONCAT(@ddl, ' ', ' CHARACTER SET utf8mb4'); SET @ddl = CONCAT(@ddl, ' ', in_column_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; + +DROP PROCEDURE IF EXISTS `cloud_usage`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`; +CREATE PROCEDURE `cloud_usage`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET` ( + IN in_table_name VARCHAR(200) +, IN in_column_name VARCHAR(200) +, IN in_column_type VARCHAR(200) +, IN in_column_definition VARCHAR(1000) +) +BEGIN + DECLARE CONTINUE HANDLER FOR 1060 BEGIN END; SET @ddl = CONCAT('ALTER TABLE ', in_table_name); SET @ddl = CONCAT(@ddl, ' ', ' MODIFY COLUMN') ; SET @ddl = CONCAT(@ddl, ' ', in_column_name); SET @ddl = CONCAT(@ddl, ' ', in_column_type); SET @ddl = CONCAT(@ddl, ' ', ' CHARACTER SET utf8mb4'); SET @ddl = CONCAT(@ddl, ' ', in_column_definition); PREPARE stmt FROM @ddl; EXECUTE stmt; DEALLOCATE PREPARE stmt; END; + +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('async_job', 'job_result', 'TEXT', 'COMMENT \'job result info\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('async_job', 'job_cmd_info', 'TEXT', 'COMMENT \'command parameter info\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('event', 'description', 'VARCHAR(1024)', 'NOT NULL'); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('usage_event', 'resource_name', 'VARCHAR(255)', 'DEFAULT NULL'); +CALL `cloud_usage`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('usage_event', 'resource_name', 'VARCHAR(255)', 'DEFAULT NULL'); + +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('account', 'account_name', 'VARCHAR(100)', 'DEFAULT NULL COMMENT \'an account name set by the creator of the account, defaults to username for single accounts\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('affinity_group', 'description', 'VARCHAR(4096)', 'DEFAULT NULL'); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('annotations', 'annotation', 'TEXT', ''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('autoscale_vmgroups', 'name', 'VARCHAR(255)', 'DEFAULT NULL COMMENT \'name of the autoscale vm group\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('backup_offering', 'name', 'VARCHAR(255)', 'NOT NULL COMMENT \'backup offering name\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('backup_offering', 'description', 'VARCHAR(255)', 'NOT NULL COMMENT \'backup offering description\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('disk_offering', 'name', 'VARCHAR(255)', 'NOT NULL'); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('disk_offering', 'unique_name', 'VARCHAR(32)', 'DEFAULT NULL COMMENT \'unique name\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('disk_offering', 'display_text', 'VARCHAR(4096)', 'DEFAULT NULL COMMENT \'Optional text set by the admin for display purpose only\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('instance_group', 'name', 'VARCHAR(255)', 'NOT NULL'); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('kubernetes_cluster', 'name', 'VARCHAR(255)', 'NOT NULL'); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('kubernetes_cluster', 'description', 'VARCHAR(4096)', 'DEFAULT NULL COMMENT \'display text for this Kubernetes cluster\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('kubernetes_supported_version', 'name', 'VARCHAR(255)', 'NOT NULL COMMENT \'the name of this Kubernetes version\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('network_offerings', 'name', 'VARCHAR(64)', 'DEFAULT NULL COMMENT \'name of the network offering\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('network_offerings', 'unique_name', 'VARCHAR(64)', 'DEFAULT NULL COMMENT \'unique name of the network offering\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('network_offerings', 'display_text', 'VARCHAR(255)', 'NOT NULL COMMENT \'text to display to users\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('networks', 'name', 'VARCHAR(255)', 'DEFAULT NULL COMMENT \'name for this network\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('networks', 'display_text', 'VARCHAR(255)', 'DEFAULT NULL COMMENT \'display text for this network\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('project_role', 'description', 'TEXT', 'COMMENT \'description of the project role\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('projects', 'name', 'VARCHAR(255)', 'DEFAULT NULL COMMENT \'project name\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('projects', 'display_text', 'VARCHAR(255)', 'DEFAULT NULL COMMENT \'project display text\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('roles', 'description', 'TEXT', 'COMMENT \'description of the role\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('service_offering', 'name', 'VARCHAR(255)', 'NOT NULL'); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('service_offering', 'unique_name', 'VARCHAR(32)', 'DEFAULT NULL COMMENT \'unique name for offerings\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('service_offering', 'display_text', 'VARCHAR(4096)', 'DEFAULT NULL'); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('snapshots', 'name', 'VARCHAR(255)', 'NOT NULL COMMENT \'snapshot name\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('ssh_keypairs', 'keypair_name', 'VARCHAR(256)', 'NOT NULL COMMENT \'name of the key pair\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('user_vm', 'display_name', 'VARCHAR(255)', 'DEFAULT NULL'); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('user_vm_details', 'value', 'VARCHAR(5120)', 'NOT NULL'); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('user', 'firstname', 'VARCHAR(255)', 'DEFAULT NULL'); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('user', 'lastname', 'VARCHAR(255)', 'DEFAULT NULL'); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('user_data', 'name', 'VARCHAR(256)', 'NOT NULL COMMENT \'name of the user data\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('vm_instance', 'display_name', 'VARCHAR(255)', 'DEFAULT NULL'); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('vm_snapshots', 'display_name', 'VARCHAR(255)', 'DEFAULT NULL'); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('vm_snapshots', 'description', 'VARCHAR(255)', 'DEFAULT NULL'); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('vm_template', 'name', 'VARCHAR(255)', 'NOT NULL'); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('vm_template', 'display_text', 'VARCHAR(4096)', 'DEFAULT NULL COMMENT \'Description text set by the admin for display purpose only\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('volumes', 'name', 'VARCHAR(255)', 'DEFAULT NULL COMMENT \'A user specified name for the volume\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('vpc', 'name', 'VARCHAR(255)', 'DEFAULT NULL COMMENT \'vpc name\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('vpc', 'display_text', 'VARCHAR(255)', 'DEFAULT NULL COMMENT \'vpc display text\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('vpc_offerings', 'name', 'VARCHAR(255)', 'DEFAULT NULL COMMENT \'vpc offering name\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('vpc_offerings', 'unique_name', 'VARCHAR(64)', 'DEFAULT NULL COMMENT \'unique name of the vpc offering\''); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('vpc_offerings', 'display_text', 'VARCHAR(255)', 'DEFAULT NULL COMMENT \'display text\''); + +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.roles','state', 'varchar(10) NOT NULL default "enabled" COMMENT "role state"'); + +-- Multi-Arch Zones +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.cluster', 'arch', 'varchar(8) DEFAULT "x86_64" COMMENT "the CPU architecture of the hosts in the cluster"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.host', 'arch', 'varchar(8) DEFAULT "x86_64" COMMENT "the CPU architecture of the host"'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.vm_template', 'arch', 'varchar(8) DEFAULT "x86_64" COMMENT "the CPU architecture of the template/ISO"'); + +-- NAS B&R Plugin Backup Repository +DROP TABLE IF EXISTS `cloud`.`backup_repository`; +CREATE TABLE `cloud`.`backup_repository` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id of the backup repository', + `uuid` varchar(255) NOT NULL COMMENT 'uuid of the backup repository', + `name` varchar(255) CHARACTER SET utf8mb4 NOT NULL COMMENT 'name of the backup repository', + `zone_id` bigint unsigned NOT NULL COMMENT 'id of zone', + `provider` varchar(255) NOT NULL COMMENT 'backup provider name', + `type` varchar(255) NOT NULL COMMENT 'backup repo type', + `address` varchar(1024) NOT NULL COMMENT 'url of the backup repository', + `mount_opts` varchar(1024) NOT NULL COMMENT 'mount options for the backup repository', + `used_bytes` bigint unsigned, + `capacity_bytes` bigint unsigned, + `created` datetime, + `removed` datetime, + PRIMARY KEY(`id`), + INDEX `i_backup_repository__uuid`(`uuid`), + INDEX `i_backup_repository__zone_id_provider`(`zone_id`, `provider`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- Drop foreign key on backup_schedule, drop unique key on vm_id and re-add foreign key to allow multiple backup schedules to be created +ALTER TABLE `cloud`.`backup_schedule` DROP FOREIGN KEY fk_backup_schedule__vm_id; +ALTER TABLE `cloud`.`backup_schedule` DROP INDEX vm_id; +ALTER TABLE `cloud`.`backup_schedule` ADD CONSTRAINT fk_backup_schedule__vm_id FOREIGN KEY (vm_id) REFERENCES vm_instance(id) ON DELETE CASCADE; + +-- Add volume details to the backups table to keep track of the volumes being backed up +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backups', 'backed_volumes', 'text DEFAULT NULL COMMENT "details of backed-up volumes" '); +CALL `cloud`.`IDEMPOTENT_MODIFY_COLUMN_CHAR_SET`('backups', 'backed_volumes', 'TEXT', 'DEFAULT NULL COMMENT \'details of backed-up volumes\''); + +-- Add support for VMware 8.0u2 (8.0.2.x) and 8.0u3 (8.0.3.x) +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities` (uuid, hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) values (UUID(), 'VMware', '8.0.2', 1024, 0, 59, 64, 1, 1); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'VMware', '8.0.2', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='VMware' AND hypervisor_version='8.0'; +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities` (uuid, hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) values (UUID(), 'VMware', '8.0.3', 1024, 0, 59, 64, 1, 1); +INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid, hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) SELECT UUID(),'VMware', '8.0.3', guest_os_name, guest_os_id, utc_timestamp(), 0 FROM `cloud`.`guest_os_hypervisor` WHERE hypervisor_type='VMware' AND hypervisor_version='8.0'; + +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.vm_instance', 'delete_protection', 'boolean DEFAULT FALSE COMMENT "delete protection for vm" '); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.volumes', 'delete_protection', 'boolean DEFAULT FALSE COMMENT "delete protection for volumes" '); + -- Add for_cks column to the vm_template table CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.vm_template','for_cks', 'int(1) unsigned DEFAULT "0" COMMENT "if true, the template can be used for CKS cluster deployment"'); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-420to421.sql b/engine/schema/src/main/resources/META-INF/db/schema-420to421.sql index b99af287bc5..25c025c5651 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-420to421.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-420to421.sql @@ -20,10 +20,10 @@ --; -INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 's3.singleupload.max.size', '5', +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 's3.singleupload.max.size', '5', 'The maximum size limit for S3 single part upload API(in GB). If it is set to 0, then it means always use multi-part upload to upload object to S3. If it is set to -1, then it means always use single-part upload to upload object to S3.'); -INSERT IGNORE INTO `cloud`.`configuration` VALUES ("Storage", 'DEFAULT', 'management-server', "enable.ha.storage.migration", "true", "Enable/disable storage migration across primary storage during HA"); +INSERT IGNORE INTO `cloud`.`configuration` VALUES ("Storage", 'DEFAULT', 'management-server', "enable.ha.storage.migration", "true", "Enable/disable storage migration across primary storage during HA"); UPDATE `cloud`.`configuration` SET description="Specify whether or not to reserve CPU based on CPU overprovisioning factor" where name="vmware.reserve.cpu"; UPDATE `cloud`.`configuration` SET description="Specify whether or not to reserve memory based on memory overprovisioning factor" where name="vmware.reserve.mem"; -- Remove Windows Server 8 from guest_os_type dropdown to use Windows Server 2012 diff --git a/engine/schema/src/main/resources/META-INF/db/schema-442to450.sql b/engine/schema/src/main/resources/META-INF/db/schema-442to450.sql index 90a52bd4273..d2ba408241e 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-442to450.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-442to450.sql @@ -671,7 +671,7 @@ CREATE VIEW `cloud`.`user_vm_view` AS resource_tags.resource_id tag_resource_id, resource_tags.resource_uuid tag_resource_uuid, resource_tags.resource_type tag_resource_type, - resource_tags.customer tag_customer, + resource_tags.customer tag_customer, async_job.id job_id, async_job.uuid job_uuid, async_job.job_status job_status, @@ -752,7 +752,7 @@ CREATE VIEW `cloud`.`user_vm_view` AS left join `cloud`.`user_vm_details` `custom_speed` ON (((`custom_speed`.`vm_id` = `cloud`.`vm_instance`.`id`) and (`custom_speed`.`name` = 'CpuSpeed'))) left join - `cloud`.`user_vm_details` `custom_ram_size` ON (((`custom_ram_size`.`vm_id` = `cloud`.`vm_instance`.`id`) and (`custom_ram_size`.`name` = 'memory'))); + `cloud`.`user_vm_details` `custom_ram_size` ON (((`custom_ram_size`.`vm_id` = `cloud`.`vm_instance`.`id`) and (`custom_ram_size`.`name` = 'memory'))); INSERT IGNORE INTO `cloud`.`guest_os` (id, uuid, category_id, display_name, created) VALUES (231, UUID(), 1, 'CentOS 5 (32-bit)', utc_timestamp()); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-481to490-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-481to490-cleanup.sql index 1868a090800..b8dd0477db9 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-481to490-cleanup.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-481to490-cleanup.sql @@ -22,7 +22,7 @@ -- Added in CLOUDSTACK-9340: General DB optimization, 4 cases: ----- 1) Incorrect PRIMARY key -ALTER TABLE `cloud`.`ovs_tunnel_network` +ALTER TABLE `cloud`.`ovs_tunnel_network` DROP PRIMARY KEY, ADD PRIMARY KEY (`id`), DROP INDEX `id` , diff --git a/engine/schema/src/main/resources/META-INF/db/schema-481to490.sql b/engine/schema/src/main/resources/META-INF/db/schema-481to490.sql index 49cfc8346c5..bac3b1e6fab 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-481to490.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-481to490.sql @@ -23,9 +23,9 @@ ALTER TABLE `event` ADD INDEX `archived` (`archived`); ALTER TABLE `event` ADD INDEX `state` (`state`); DROP VIEW IF EXISTS `cloud`.`template_view`; -CREATE +CREATE VIEW `template_view` AS - SELECT + SELECT `vm_template`.`id` AS `id`, `vm_template`.`uuid` AS `uuid`, `vm_template`.`unique_name` AS `unique_name`, @@ -124,9 +124,9 @@ VIEW `template_view` AS OR (`resource_tags`.`resource_type` = 'ISO'))))); DROP VIEW IF EXISTS `cloud`.`volume_view`; -CREATE +CREATE VIEW `volume_view` AS - SELECT + SELECT `volumes`.`id` AS `id`, `volumes`.`uuid` AS `uuid`, `volumes`.`name` AS `name`, @@ -234,9 +234,9 @@ VIEW `volume_view` AS AND (`async_job`.`job_status` = 0)))); DROP VIEW IF EXISTS `cloud`.`user_vm_view`; -CREATE +CREATE VIEW `user_vm_view` AS - SELECT + SELECT `vm_instance`.`id` AS `id`, `vm_instance`.`name` AS `name`, `user_vm`.`display_name` AS `display_name`, @@ -423,10 +423,10 @@ ALTER TABLE `cloud`.`ssh_keypairs` ADD INDEX `i_public_key` (`public_key` (64) A ALTER TABLE `cloud`.`user_vm_details` ADD INDEX `i_name_vm_id` (`vm_id` ASC, `name` ASC); ALTER TABLE `cloud`.`instance_group` ADD INDEX `i_name` (`name` ASC); ------ 4) Some views query (Change view to improve account retrieval speed) +----- 4) Some views query (Change view to improve account retrieval speed) CREATE OR REPLACE VIEW `account_vmstats_view` AS - SELECT + SELECT `vm_instance`.`account_id` AS `account_id`, `vm_instance`.`state` AS `state`, COUNT(0) AS `vmcount` diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4910to4920.sql b/engine/schema/src/main/resources/META-INF/db/schema-4910to4920.sql index a910a8b7799..1aa63020124 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-4910to4920.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-4910to4920.sql @@ -19,42 +19,6 @@ -- Schema upgrade from 4.9.1.0 to 4.9.2.0; --; ---; --- Stored procedure to do idempotent insert; ---; - -DROP PROCEDURE IF EXISTS `cloud`.`IDEMPOTENT_INSERT_GUESTOS_HYPERVISOR_MAPPING`; - -CREATE PROCEDURE `cloud`.`IDEMPOTENT_INSERT_GUESTOS_HYPERVISOR_MAPPING`( - IN in_hypervisor_type VARCHAR(32), - IN in_hypervisor_version VARCHAR(32), - IN in_guest_os_name VARCHAR(255), - IN in_guest_os_id BIGINT(20) UNSIGNED, - IN is_user_defined int(1) UNSIGNED) -BEGIN - IF NOT EXISTS ((SELECT * FROM `cloud`.`guest_os_hypervisor` WHERE - hypervisor_type=in_hypervisor_type AND - hypervisor_version=in_hypervisor_version AND - guest_os_id = in_guest_os_id)) - THEN - INSERT INTO `cloud`.`guest_os_hypervisor` ( - uuid, - hypervisor_type, - hypervisor_version, - guest_os_name, - guest_os_id, - created, - is_user_defined) - VALUES ( - UUID(), - in_hypervisor_type, - in_hypervisor_version, - in_guest_os_name, - in_guest_os_id, - utc_timestamp(), - is_user_defined - ); END IF; END;; - CALL `cloud`.`IDEMPOTENT_INSERT_GUESTOS_HYPERVISOR_MAPPING`('Xenserver', '7.0.0', 'CentOS 4.5 (32-bit)', 1, 0); CALL `cloud`.`IDEMPOTENT_INSERT_GUESTOS_HYPERVISOR_MAPPING`('Xenserver', '7.0.0', 'CentOS 4.6 (32-bit)', 2, 0); CALL `cloud`.`IDEMPOTENT_INSERT_GUESTOS_HYPERVISOR_MAPPING`('Xenserver', '7.0.0', 'CentOS 4.7 (32-bit)', 3, 0); @@ -234,5 +198,3 @@ CALL `cloud`.`IDEMPOTENT_INSERT_GUESTOS_HYPERVISOR_MAPPING`('Xenserver', '7.0.0' CALL `cloud`.`IDEMPOTENT_INSERT_GUESTOS_HYPERVISOR_MAPPING`('Xenserver', '7.0.0', 'Ubuntu Trusty Tahr 14.04', 255, 0); CALL `cloud`.`IDEMPOTENT_INSERT_GUESTOS_HYPERVISOR_MAPPING`('Xenserver', '7.0.0', 'Ubuntu Trusty Tahr 14.04', 256, 0); - -DROP PROCEDURE `cloud`.`IDEMPOTENT_INSERT_GUESTOS_HYPERVISOR_MAPPING` diff --git a/engine/schema/src/main/resources/META-INF/db/schema-4930to41000.sql b/engine/schema/src/main/resources/META-INF/db/schema-4930to41000.sql index dc0cd6d4d75..23670757247 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-4930to41000.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-4930to41000.sql @@ -147,7 +147,7 @@ CREATE TABLE IF NOT EXISTS `cloud`.`storage_pool_tags` ( ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8; -- Insert storage tags from storage_pool_details -INSERT INTO `cloud`.`storage_pool_tags` (pool_id, tag) SELECT pool_id, +INSERT INTO `cloud`.`storage_pool_tags` (pool_id, tag) SELECT pool_id, name FROM `cloud`.`storage_pool_details` WHERE value = 'true'; -- Alter view storage_pool_view @@ -227,7 +227,7 @@ ALTER TABLE `cloud`.`vm_snapshots` ADD CONSTRAINT `fk_vm_snapshots_service_offer INSERT INTO `cloud`.`vm_snapshot_details` (vm_snapshot_id, name, value) SELECT s.id, d.name, d.value FROM `cloud`.`user_vm_details` d JOIN `cloud`.`vm_instance` v ON (d.vm_id = v.id) -JOIN `cloud`.`service_offering` o ON (v.service_offering_id = o.id) +JOIN `cloud`.`service_offering` o ON (v.service_offering_id = o.id) JOIN `cloud`.`vm_snapshots` s ON (s.service_offering_id = o.id AND s.vm_id = v.id) WHERE (o.cpu is null AND o.speed IS NULL AND o.ram_size IS NULL) AND (d.name = 'cpuNumber' OR d.name = 'cpuSpeed' OR d.name = 'memory'); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-level.sql b/engine/schema/src/main/resources/META-INF/db/schema-level.sql index 72aade4e501..fef961502fa 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-level.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-level.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-snapshot-217to224.sql b/engine/schema/src/main/resources/META-INF/db/schema-snapshot-217to224.sql index 7320bda5906..5e29435855d 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-snapshot-217to224.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-snapshot-217to224.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/schema-snapshot-223to224.sql b/engine/schema/src/main/resources/META-INF/db/schema-snapshot-223to224.sql index 668cbb692b9..5c27eed68f0 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-snapshot-223to224.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-snapshot-223to224.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.host_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.host_view.sql index 7bd4b3cc4a9..6fc8fb80386 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.host_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.host_view.sql @@ -41,6 +41,7 @@ SELECT host.cpus, host.speed, host.ram, + host.arch, cluster.id cluster_id, cluster.uuid cluster_uuid, cluster.name cluster_name, diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql index bae73deda32..b6abaabcd48 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql @@ -61,8 +61,10 @@ SELECT `network_offerings`.`for_vpc` AS `for_vpc`, `network_offerings`.`for_tungsten` AS `for_tungsten`, `network_offerings`.`for_nsx` AS `for_nsx`, - `network_offerings`.`nsx_mode` AS `nsx_mode`, + `network_offerings`.`network_mode` AS `network_mode`, `network_offerings`.`service_package_id` AS `service_package_id`, + `network_offerings`.`routing_mode` AS `routing_mode`, + `network_offerings`.`specify_as_number` AS `specify_as_number`, GROUP_CONCAT(DISTINCT(domain.id)) AS domain_id, GROUP_CONCAT(DISTINCT(domain.uuid)) AS domain_uuid, GROUP_CONCAT(DISTINCT(domain.name)) AS domain_name, diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.shared_filesystem_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.shared_filesystem_view.sql new file mode 100644 index 00000000000..1f72babd1ce --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.shared_filesystem_view.sql @@ -0,0 +1,83 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- VIEW `cloud`.`shared_filesystem_view`; + +DROP VIEW IF EXISTS `cloud`.`shared_filesystem_view`; + +CREATE VIEW `cloud`.`shared_filesystem_view` AS +SELECT + `shared_filesystem`.`id` AS `id`, + `shared_filesystem`.`uuid` AS `uuid`, + `shared_filesystem`.`name` AS `name`, + `shared_filesystem`.`description` AS `description`, + `shared_filesystem`.`state` AS `state`, + `shared_filesystem`.`fs_provider_name` AS `provider`, + `shared_filesystem`.`fs_type` AS `fs_type`, + `shared_filesystem`.`volume_id` AS `volume_id`, + `shared_filesystem`.`account_id` AS `account_id`, + `shared_filesystem`.`data_center_id` AS `zone_id`, + `zone`.`uuid` AS `zone_uuid`, + `zone`.`name` AS `zone_name`, + `instance`.`id` AS `instance_id`, + `instance`.`uuid` AS `instance_uuid`, + `instance`.`name` AS `instance_name`, + `instance`.`state` AS `instance_state`, + `volumes`.`size` AS `size`, + `volumes`.`uuid` AS `volume_uuid`, + `volumes`.`name` AS `volume_name`, + `volumes`.`provisioning_type` AS `provisioning_type`, + `volumes`.`format` AS `volume_format`, + `volumes`.`path` AS `volume_path`, + `volumes`.`chain_info` AS `volume_chain_info`, + `storage_pool`.`uuid` AS `pool_uuid`, + `storage_pool`.`name` AS `pool_name`, + `account`.`account_name` AS `account_name`, + `project`.`uuid` AS `project_uuid`, + `project`.`name` AS `project_name`, + `domain`.`uuid` AS `domain_uuid`, + `domain`.`name` AS `domain_name`, + `domain`.`path` AS `domain_path`, + `service_offering`.`uuid` AS `service_offering_uuid`, + `service_offering`.`name` AS `service_offering_name`, + `disk_offering`.`uuid` AS `disk_offering_uuid`, + `disk_offering`.`name` AS `disk_offering_name`, + `disk_offering`.`display_text` AS `disk_offering_display_text`, + `disk_offering`.`disk_size` AS `disk_offering_size`, + `disk_offering`.`customized` AS `disk_offering_custom` +FROM + `cloud`.`shared_filesystem` + LEFT JOIN + `cloud`.`data_center` AS `zone` ON `shared_filesystem`.`data_center_id` = `zone`.`id` + LEFT JOIN + `cloud`.`vm_instance` AS `instance` ON `shared_filesystem`.`vm_id` = `instance`.`id` + LEFT JOIN + `cloud`.`volumes` AS `volumes` ON `shared_filesystem`.`volume_id` = `volumes`.`id` + LEFT JOIN + `cloud`.`storage_pool` AS `storage_pool` ON `volumes`.`pool_id` = `storage_pool`.`id` + LEFT JOIN + `cloud`.`account` AS `account` ON `shared_filesystem`.`account_id` = `account`.`id` + LEFT JOIN + `cloud`.`projects` AS `project` ON `project`.`project_account_id` = `account`.`id` + LEFT JOIN + `cloud`.`domain` AS `domain` ON `shared_filesystem`.`domain_id` = `domain`.`id` + LEFT JOIN + `cloud`.`service_offering` AS `service_offering` ON `shared_filesystem`.`service_offering_id` = `service_offering`.`id` + LEFT JOIN + `cloud`.`disk_offering` AS `disk_offering` ON `volumes`.`disk_offering_id` = `disk_offering`.`id` +GROUP BY + `shared_filesystem`.`id`; diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.snapshot_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.snapshot_view.sql index c6b8d6b4d05..d0eddc1fc4b 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.snapshot_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.snapshot_view.sql @@ -48,6 +48,7 @@ SELECT `volumes`.`uuid` AS `volume_uuid`, `volumes`.`name` AS `volume_name`, `volumes`.`volume_type` AS `volume_type`, + `volumes`.`state` AS `volume_state`, `volumes`.`size` AS `volume_size`, `data_center`.`id` AS `data_center_id`, `data_center`.`uuid` AS `data_center_uuid`, diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.template_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.template_view.sql index 15789d6b9dc..93aa72ad066 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.template_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.template_view.sql @@ -50,6 +50,7 @@ SELECT `vm_template`.`sort_key` AS `sort_key`, `vm_template`.`removed` AS `removed`, `vm_template`.`enable_sshkey` AS `enable_sshkey`, + `vm_template`.`arch` AS `arch`, `parent_template`.`id` AS `parent_template_id`, `parent_template`.`uuid` AS `parent_template_uuid`, `source_template`.`id` AS `source_template_id`, diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.user_vm_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.user_vm_view.sql index 25f95709721..97cb7b735cf 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.user_vm_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.user_vm_view.sql @@ -25,6 +25,7 @@ SELECT `vm_instance`.`name` AS `name`, `user_vm`.`display_name` AS `display_name`, `user_vm`.`user_data` AS `user_data`, + `user_vm`.`user_vm_type` AS `user_vm_type`, `account`.`id` AS `account_id`, `account`.`uuid` AS `account_uuid`, `account`.`account_name` AS `account_name`, @@ -53,6 +54,7 @@ SELECT `vm_instance`.`instance_name` AS `instance_name`, `vm_instance`.`guest_os_id` AS `guest_os_id`, `vm_instance`.`display_vm` AS `display_vm`, + `vm_instance`.`delete_protection` AS `delete_protection`, `guest_os`.`uuid` AS `guest_os_uuid`, `vm_instance`.`pod_id` AS `pod_id`, `host_pod_ref`.`uuid` AS `pod_uuid`, @@ -196,7 +198,7 @@ FROM LEFT JOIN `networks` ON ((`nics`.`network_id` = `networks`.`id`))) LEFT JOIN `vpc` ON (((`networks`.`vpc_id` = `vpc`.`id`) AND ISNULL(`vpc`.`removed`)))) - LEFT JOIN `user_ip_address` ON ((`user_ip_address`.`vm_id` = `vm_instance`.`id`))) + LEFT JOIN `user_ip_address` FORCE INDEX(`fk_user_ip_address__vm_id`) ON ((`user_ip_address`.`vm_id` = `vm_instance`.`id`))) LEFT JOIN `user_vm_details` `ssh_details` ON (((`ssh_details`.`vm_id` = `vm_instance`.`id`) AND (`ssh_details`.`name` = 'SSH.KeyPairNames')))) LEFT JOIN `resource_tags` ON (((`resource_tags`.`resource_id` = `vm_instance`.`id`) diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.volume_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.volume_view.sql index fd21fff1494..ffeb93e8fa7 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.volume_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.volume_view.sql @@ -39,6 +39,8 @@ SELECT `volumes`.`path` AS `path`, `volumes`.`chain_info` AS `chain_info`, `volumes`.`external_uuid` AS `external_uuid`, + `volumes`.`encrypt_format` AS `encrypt_format`, + `volumes`.`delete_protection` AS `delete_protection`, `account`.`id` AS `account_id`, `account`.`uuid` AS `account_uuid`, `account`.`account_name` AS `account_name`, diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.vpc_offering_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.vpc_offering_view.sql index 9aca869b510..c74d50590de 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.vpc_offering_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.vpc_offering_view.sql @@ -29,7 +29,7 @@ select `vpc_offerings`.`state` AS `state`, `vpc_offerings`.`default` AS `default`, `vpc_offerings`.`for_nsx` AS `for_nsx`, - `vpc_offerings`.`nsx_mode` AS `nsx_mode`, + `vpc_offerings`.`network_mode` AS `network_mode`, `vpc_offerings`.`created` AS `created`, `vpc_offerings`.`removed` AS `removed`, `vpc_offerings`.`service_offering_id` AS `service_offering_id`, @@ -37,6 +37,8 @@ select `vpc_offerings`.`supports_region_level_vpc` AS `supports_region_level_vpc`, `vpc_offerings`.`redundant_router_service` AS `redundant_router_service`, `vpc_offerings`.`sort_key` AS `sort_key`, + `vpc_offerings`.`routing_mode` AS `routing_mode`, + `vpc_offerings`.`specify_as_number` AS `specify_as_number`, group_concat(distinct `domain`.`id` separator ',') AS `domain_id`, group_concat(distinct `domain`.`uuid` separator ',') AS `domain_uuid`, group_concat(distinct `domain`.`name` separator ',') AS `domain_name`, diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.webhook_delivery_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.webhook_delivery_view.sql new file mode 100644 index 00000000000..54ba52fba4a --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.webhook_delivery_view.sql @@ -0,0 +1,48 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- VIEW `cloud`.`webhook_delivery_view`; + +DROP VIEW IF EXISTS `cloud`.`webhook_delivery_view`; +CREATE VIEW `cloud`.`webhook_delivery_view` AS + SELECT + webhook_delivery.id, + webhook_delivery.uuid, + webhook_delivery.headers, + webhook_delivery.payload, + webhook_delivery.success, + webhook_delivery.response, + webhook_delivery.start_time, + webhook_delivery.end_time, + event.id event_id, + event.uuid event_uuid, + event.type event_type, + webhook.id webhook_id, + webhook.uuid webhook_uuid, + webhook.name webhook_name, + mshost.id mshost_id, + mshost.uuid mshost_uuid, + mshost.msid mshost_msid, + mshost.name mshost_name + FROM + `cloud`.`webhook_delivery` + INNER JOIN + `cloud`.`event` ON webhook_delivery.event_id = event.id + INNER JOIN + `cloud`.`webhook` ON webhook_delivery.webhook_id = webhook.id + LEFT JOIN + `cloud`.`mshost` ON mshost.msid = webhook_delivery.mshost_msid; diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.webhook_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.webhook_view.sql new file mode 100644 index 00000000000..443463eec4b --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.webhook_view.sql @@ -0,0 +1,52 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- VIEW `cloud`.`webhook_view`; + +DROP VIEW IF EXISTS `cloud`.`webhook_view`; +CREATE VIEW `cloud`.`webhook_view` AS + SELECT + webhook.id, + webhook.uuid, + webhook.name, + webhook.description, + webhook.state, + webhook.payload_url, + webhook.secret_key, + webhook.ssl_verification, + webhook.scope, + webhook.created, + webhook.removed, + account.id account_id, + account.uuid account_uuid, + account.account_name account_name, + account.type account_type, + domain.id domain_id, + domain.uuid domain_uuid, + domain.name domain_name, + domain.path domain_path, + projects.id project_id, + projects.uuid project_uuid, + projects.name project_name + FROM + `cloud`.`webhook` + INNER JOIN + `cloud`.`account` ON webhook.account_id = account.id + INNER JOIN + `cloud`.`domain` ON webhook.domain_id = domain.id + LEFT JOIN + `cloud`.`projects` ON projects.project_account_id = webhook.account_id; diff --git a/engine/schema/src/test/java/com/cloud/host/HostVOTest.java b/engine/schema/src/test/java/com/cloud/host/HostVOTest.java index cd9ac3cc172..3262c4cc291 100755 --- a/engine/schema/src/test/java/com/cloud/host/HostVOTest.java +++ b/engine/schema/src/test/java/com/cloud/host/HostVOTest.java @@ -20,14 +20,18 @@ import com.cloud.offering.ServiceOffering; import com.cloud.service.ServiceOfferingVO; import com.cloud.template.VirtualMachineTemplate; import com.cloud.vm.VirtualMachine; -import java.util.Arrays; -import java.util.List; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Set; + +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import org.junit.Test; -import org.junit.Before; -import org.mockito.Mockito; public class HostVOTest { HostVO host; @@ -37,7 +41,7 @@ public class HostVOTest { public void setUp() throws Exception { host = new HostVO(); offering = new ServiceOfferingVO("TestSO", 0, 0, 0, 0, 0, - false, "TestSO", false,VirtualMachine.Type.User,false); + false, "TestSO", false, VirtualMachine.Type.User, false); } @Test @@ -52,14 +56,14 @@ public class HostVOTest { @Test public void testRightTag() { - host.setHostTags(Arrays.asList("tag1","tag2"), false); + host.setHostTags(Arrays.asList("tag1", "tag2"), false); offering.setHostTag("tag2,tag1"); assertTrue(host.checkHostServiceOfferingTags(offering)); } @Test public void testWrongTag() { - host.setHostTags(Arrays.asList("tag1","tag2"), false); + host.setHostTags(Arrays.asList("tag1", "tag2"), false); offering.setHostTag("tag2,tag4"); assertFalse(host.checkHostServiceOfferingTags(offering)); } @@ -87,40 +91,59 @@ public class HostVOTest { @Test public void testEitherNoSOOrTemplate() { - assertFalse(host.checkHostServiceOfferingAndTemplateTags(null, Mockito.mock(VirtualMachineTemplate.class))); - assertFalse(host.checkHostServiceOfferingAndTemplateTags(Mockito.mock(ServiceOffering.class), null)); + assertFalse(host.checkHostServiceOfferingAndTemplateTags(null, Mockito.mock(VirtualMachineTemplate.class), null)); + assertFalse(host.checkHostServiceOfferingAndTemplateTags(Mockito.mock(ServiceOffering.class), null, null)); } @Test public void testNoTagOfferingTemplate() { - assertTrue(host.checkHostServiceOfferingAndTemplateTags(offering, Mockito.mock(VirtualMachineTemplate.class))); + assertTrue(host.checkHostServiceOfferingAndTemplateTags(offering, Mockito.mock(VirtualMachineTemplate.class), Collections.emptySet())); + assertTrue(host.getHostServiceOfferingAndTemplateMissingTags(offering, Mockito.mock(VirtualMachineTemplate.class), Collections.emptySet()).isEmpty()); + assertTrue(host.checkHostServiceOfferingAndTemplateTags(offering, Mockito.mock(VirtualMachineTemplate.class), Set.of("tag1", "tag2"))); + assertTrue(host.getHostServiceOfferingAndTemplateMissingTags(offering, Mockito.mock(VirtualMachineTemplate.class), Set.of("tag1", "tag2")).isEmpty()); } @Test public void testRightTagOfferingTemplate() { host.setHostTags(Arrays.asList("tag1", "tag2"), false); offering.setHostTag("tag2,tag1"); - assertTrue(host.checkHostServiceOfferingAndTemplateTags(offering, Mockito.mock(VirtualMachineTemplate.class))); + assertTrue(host.checkHostServiceOfferingAndTemplateTags(offering, Mockito.mock(VirtualMachineTemplate.class), Set.of("tag1"))); + Set actualMissingTags = host.getHostServiceOfferingAndTemplateMissingTags(offering, Mockito.mock(VirtualMachineTemplate.class), Set.of("tag1")); + assertTrue(actualMissingTags.isEmpty()); + host.setHostTags(Arrays.asList("tag1", "tag2", "tag3"), false); offering.setHostTag("tag2,tag1"); VirtualMachineTemplate template = Mockito.mock(VirtualMachineTemplate.class); Mockito.when(template.getTemplateTag()).thenReturn("tag3"); - assertTrue(host.checkHostServiceOfferingAndTemplateTags(offering, template)); + assertTrue(host.checkHostServiceOfferingAndTemplateTags(offering, template, Set.of("tag2", "tag3"))); + actualMissingTags = host.getHostServiceOfferingAndTemplateMissingTags(offering, template, Set.of("tag2", "tag3")); + assertTrue(actualMissingTags.isEmpty()); host.setHostTags(List.of("tag3"), false); offering.setHostTag(null); - assertTrue(host.checkHostServiceOfferingAndTemplateTags(offering, template)); + assertTrue(host.checkHostServiceOfferingAndTemplateTags(offering, template, Set.of("tag3"))); + actualMissingTags = host.getHostServiceOfferingAndTemplateMissingTags(offering, template, Set.of("tag3")); + assertTrue(actualMissingTags.isEmpty()); + + assertTrue(host.checkHostServiceOfferingAndTemplateTags(offering, template, Set.of("tag2", "tag1"))); + actualMissingTags = host.getHostServiceOfferingAndTemplateMissingTags(offering, template, Set.of("tag2", "tag1")); + assertTrue(actualMissingTags.isEmpty()); } @Test public void testWrongOfferingTag() { - host.setHostTags(Arrays.asList("tag1","tag2"), false); + host.setHostTags(Arrays.asList("tag1", "tag2"), false); offering.setHostTag("tag2,tag4"); VirtualMachineTemplate template = Mockito.mock(VirtualMachineTemplate.class); Mockito.when(template.getTemplateTag()).thenReturn("tag1"); - assertFalse(host.checkHostServiceOfferingAndTemplateTags(offering, template)); + assertFalse(host.checkHostServiceOfferingAndTemplateTags(offering, template, Set.of("tag1", "tag2", "tag3", "tag4"))); + Set actualMissingTags = host.getHostServiceOfferingAndTemplateMissingTags(offering, template, Set.of("tag1", "tag2", "tag3", "tag4")); + assertEquals(Set.of("tag4"), actualMissingTags); + offering.setHostTag("tag1,tag2"); template = Mockito.mock(VirtualMachineTemplate.class); Mockito.when(template.getTemplateTag()).thenReturn("tag3"); - assertFalse(host.checkHostServiceOfferingAndTemplateTags(offering, template)); + actualMissingTags = host.getHostServiceOfferingAndTemplateMissingTags(offering, template, Set.of("tag1", "tag2", "tag3", "tag4")); + assertFalse(host.checkHostServiceOfferingAndTemplateTags(offering, template, Set.of("tag1", "tag2", "tag3", "tag4"))); + assertEquals(Set.of("tag3"), actualMissingTags); } } diff --git a/engine/schema/src/test/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDaoImplTest.java b/engine/schema/src/test/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDaoImplTest.java index e13ad42ec80..6de8960ae74 100644 --- a/engine/schema/src/test/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDaoImplTest.java +++ b/engine/schema/src/test/java/com/cloud/network/as/dao/AutoScaleVmGroupVmMapDaoImplTest.java @@ -19,11 +19,9 @@ package com.cloud.network.as.dao; -import com.cloud.network.as.AutoScaleVmGroupVmMapVO; -import com.cloud.utils.db.GenericSearchBuilder; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.vm.VirtualMachine; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; import org.junit.Assert; import org.junit.Before; @@ -33,9 +31,13 @@ import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; -import java.util.Arrays; -import java.util.List; +import com.cloud.network.as.AutoScaleVmGroupVmMapVO; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.vm.VirtualMachine; @RunWith(MockitoJUnitRunner.class) public class AutoScaleVmGroupVmMapDaoImplTest { @@ -198,4 +200,33 @@ public class AutoScaleVmGroupVmMapDaoImplTest { Mockito.verify(searchCriteriaAutoScaleVmGroupVmMapVOMock).setParameters("vmGroupId", groupId); Mockito.verify(AutoScaleVmGroupVmMapDaoImplSpy).remove(searchCriteriaAutoScaleVmGroupVmMapVOMock); } + + @Test + public void testExpungeByVmListNoVms() { + Assert.assertEquals(0, AutoScaleVmGroupVmMapDaoImplSpy.expungeByVmList( + new ArrayList<>(), 100L)); + Assert.assertEquals(0, AutoScaleVmGroupVmMapDaoImplSpy.expungeByVmList( + null, 100L)); + } + + @Test + public void testExpungeByVmList() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doAnswer((Answer) invocationOnMock -> { + Long batchSize = (Long)invocationOnMock.getArguments()[1]; + return batchSize == null ? 0 : batchSize.intValue(); + }).when(AutoScaleVmGroupVmMapDaoImplSpy).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); + Mockito.when(AutoScaleVmGroupVmMapDaoImplSpy.createSearchBuilder()).thenReturn(sb); + final AutoScaleVmGroupVmMapVO mockedVO = Mockito.mock(AutoScaleVmGroupVmMapVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + List vmIds = List.of(1L, 2L); + Object[] array = vmIds.toArray(); + Long batchSize = 50L; + Assert.assertEquals(batchSize.intValue(), AutoScaleVmGroupVmMapDaoImplSpy.expungeByVmList(List.of(1L, 2L), batchSize)); + Mockito.verify(sc).setParameters("vmIds", array); + Mockito.verify(AutoScaleVmGroupVmMapDaoImplSpy, Mockito.times(1)) + .batchExpunge(sc, batchSize); + } } diff --git a/engine/schema/src/test/java/com/cloud/network/dao/IPAddressDaoImplTest.java b/engine/schema/src/test/java/com/cloud/network/dao/IPAddressDaoImplTest.java new file mode 100644 index 00000000000..d8f6a08d8d3 --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/network/dao/IPAddressDaoImplTest.java @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.dao; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class IPAddressDaoImplTest { + + @Spy + IPAddressDaoImpl ipAddressDaoImplSpy; + + @Test + public void testExpungeByVmListNoVms() { + Assert.assertEquals(0, ipAddressDaoImplSpy.expungeByVmList( + new ArrayList<>(), 100L)); + Assert.assertEquals(0, ipAddressDaoImplSpy.expungeByVmList( + null, 100L)); + } + + @Test + public void testExpungeByVmList() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doAnswer((Answer) invocationOnMock -> { + Long batchSize = (Long)invocationOnMock.getArguments()[1]; + return batchSize == null ? 0 : batchSize.intValue(); + }).when(ipAddressDaoImplSpy).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); + Mockito.when(ipAddressDaoImplSpy.createSearchBuilder()).thenReturn(sb); + final IPAddressVO mockedVO = Mockito.mock(IPAddressVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + List vmIds = List.of(1L, 2L); + Object[] array = vmIds.toArray(); + Long batchSize = 50L; + Assert.assertEquals(batchSize.intValue(), ipAddressDaoImplSpy.expungeByVmList(List.of(1L, 2L), batchSize)); + Mockito.verify(sc).setParameters("vmIds", array); + Mockito.verify(ipAddressDaoImplSpy, Mockito.times(1)) + .batchExpunge(sc, batchSize); + } +} diff --git a/engine/schema/src/test/java/com/cloud/network/dao/InlineLoadBalancerNicMapDaoImplTest.java b/engine/schema/src/test/java/com/cloud/network/dao/InlineLoadBalancerNicMapDaoImplTest.java new file mode 100644 index 00000000000..8e06c7618f6 --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/network/dao/InlineLoadBalancerNicMapDaoImplTest.java @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.dao; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class InlineLoadBalancerNicMapDaoImplTest { + + @Spy + InlineLoadBalancerNicMapDaoImpl inlineLoadBalancerNicMapDaoImplSpy; + + @Test + public void testExpungeByNicListNoVms() { + Assert.assertEquals(0, inlineLoadBalancerNicMapDaoImplSpy.expungeByNicList( + new ArrayList<>(), 100L)); + Assert.assertEquals(0, inlineLoadBalancerNicMapDaoImplSpy.expungeByNicList( + null, 100L)); + } + + @Test + public void testExpungeByNicList() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doAnswer((Answer) invocationOnMock -> { + Long batchSize = (Long)invocationOnMock.getArguments()[1]; + return batchSize == null ? 0 : batchSize.intValue(); + }).when(inlineLoadBalancerNicMapDaoImplSpy).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); + Mockito.when(inlineLoadBalancerNicMapDaoImplSpy.createSearchBuilder()).thenReturn(sb); + final InlineLoadBalancerNicMapVO mockedVO = Mockito.mock(InlineLoadBalancerNicMapVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + List vmIds = List.of(1L, 2L); + Object[] array = vmIds.toArray(); + Long batchSize = 50L; + Assert.assertEquals(batchSize.intValue(), inlineLoadBalancerNicMapDaoImplSpy.expungeByNicList(List.of(1L, 2L), batchSize)); + Mockito.verify(sc).setParameters("nicIds", array); + Mockito.verify(inlineLoadBalancerNicMapDaoImplSpy, Mockito.times(1)) + .batchExpunge(sc, batchSize); + } +} diff --git a/engine/schema/src/test/java/com/cloud/network/dao/LoadBalancerVMMapDaoImplTest.java b/engine/schema/src/test/java/com/cloud/network/dao/LoadBalancerVMMapDaoImplTest.java new file mode 100644 index 00000000000..fa957194903 --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/network/dao/LoadBalancerVMMapDaoImplTest.java @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.dao; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class LoadBalancerVMMapDaoImplTest { + + @Spy + LoadBalancerVMMapDaoImpl loadBalancerVMMapDaoImplSpy; + + @Test + public void testExpungeByVmListNoVms() { + Assert.assertEquals(0, loadBalancerVMMapDaoImplSpy.expungeByVmList( + new ArrayList<>(), 100L)); + Assert.assertEquals(0, loadBalancerVMMapDaoImplSpy.expungeByVmList( + null, 100L)); + } + + @Test + public void testExpungeByVmList() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doAnswer((Answer) invocationOnMock -> { + Long batchSize = (Long)invocationOnMock.getArguments()[1]; + return batchSize == null ? 0 : batchSize.intValue(); + }).when(loadBalancerVMMapDaoImplSpy).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); + Mockito.when(loadBalancerVMMapDaoImplSpy.createSearchBuilder()).thenReturn(sb); + final LoadBalancerVMMapVO mockedVO = Mockito.mock(LoadBalancerVMMapVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + List vmIds = List.of(1L, 2L); + Object[] array = vmIds.toArray(); + Long batchSize = 50L; + Assert.assertEquals(batchSize.intValue(), loadBalancerVMMapDaoImplSpy.expungeByVmList(List.of(1L, 2L), batchSize)); + Mockito.verify(sc).setParameters("vmIds", array); + Mockito.verify(loadBalancerVMMapDaoImplSpy, Mockito.times(1)) + .batchExpunge(sc, batchSize); + } +} diff --git a/engine/schema/src/test/java/com/cloud/network/dao/OpRouterMonitorServiceDaoImplTest.java b/engine/schema/src/test/java/com/cloud/network/dao/OpRouterMonitorServiceDaoImplTest.java new file mode 100644 index 00000000000..7d0b1b069ba --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/network/dao/OpRouterMonitorServiceDaoImplTest.java @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.dao; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class OpRouterMonitorServiceDaoImplTest { + + @Spy + OpRouterMonitorServiceDaoImpl opRouterMonitorServiceDaoImplSpy; + + @Test + public void testExpungeByVmListNoVms() { + Assert.assertEquals(0, opRouterMonitorServiceDaoImplSpy.expungeByVmList( + new ArrayList<>(), 100L)); + Assert.assertEquals(0, opRouterMonitorServiceDaoImplSpy.expungeByVmList( + null, 100L)); + } + + @Test + public void testExpungeByVmList() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doAnswer((Answer) invocationOnMock -> { + Long batchSize = (Long)invocationOnMock.getArguments()[1]; + return batchSize == null ? 0 : batchSize.intValue(); + }).when(opRouterMonitorServiceDaoImplSpy).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); + Mockito.when(opRouterMonitorServiceDaoImplSpy.createSearchBuilder()).thenReturn(sb); + final OpRouterMonitorServiceVO mockedVO = Mockito.mock(OpRouterMonitorServiceVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + List vmIds = List.of(1L, 2L); + Object[] array = vmIds.toArray(); + Long batchSize = 50L; + Assert.assertEquals(batchSize.intValue(), opRouterMonitorServiceDaoImplSpy.expungeByVmList(List.of(1L, 2L), batchSize)); + Mockito.verify(sc).setParameters("vmIds", array); + Mockito.verify(opRouterMonitorServiceDaoImplSpy, Mockito.times(1)) + .batchExpunge(sc, batchSize); + } +} diff --git a/engine/schema/src/test/java/com/cloud/network/rules/dao/PortForwardingRulesDaoImplTest.java b/engine/schema/src/test/java/com/cloud/network/rules/dao/PortForwardingRulesDaoImplTest.java new file mode 100644 index 00000000000..c60e9b1f1bf --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/network/rules/dao/PortForwardingRulesDaoImplTest.java @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.rules.dao; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import com.cloud.network.rules.PortForwardingRuleVO; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class PortForwardingRulesDaoImplTest { + + @Spy + PortForwardingRulesDaoImpl portForwardingRulesDaoImplSpy; + + @Test + public void testExpungeByVmListNoVms() { + Assert.assertEquals(0, portForwardingRulesDaoImplSpy.expungeByVmList( + new ArrayList<>(), 100L)); + Assert.assertEquals(0, portForwardingRulesDaoImplSpy.expungeByVmList( + null, 100L)); + } + + @Test + public void testExpungeByVmList() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doAnswer((Answer) invocationOnMock -> { + Long batchSize = (Long)invocationOnMock.getArguments()[1]; + return batchSize == null ? 0 : batchSize.intValue(); + }).when(portForwardingRulesDaoImplSpy).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); + Mockito.when(portForwardingRulesDaoImplSpy.createSearchBuilder()).thenReturn(sb); + final PortForwardingRuleVO mockedVO = Mockito.mock(PortForwardingRuleVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + List vmIds = List.of(1L, 2L); + Object[] array = vmIds.toArray(); + Long batchSize = 50L; + Assert.assertEquals(batchSize.intValue(), portForwardingRulesDaoImplSpy.expungeByVmList(List.of(1L, 2L), batchSize)); + Mockito.verify(sc).setParameters("vmIds", array); + Mockito.verify(portForwardingRulesDaoImplSpy, Mockito.times(1)) + .batchExpunge(sc, batchSize); + } +} diff --git a/engine/schema/src/test/java/com/cloud/secstorage/CommandExecLogDaoImplTest.java b/engine/schema/src/test/java/com/cloud/secstorage/CommandExecLogDaoImplTest.java new file mode 100644 index 00000000000..f86df6bdd36 --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/secstorage/CommandExecLogDaoImplTest.java @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.secstorage; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class CommandExecLogDaoImplTest { + + @Spy + CommandExecLogDaoImpl commandExecLogDaoImplSpy; + + @Test + public void testExpungeByVmListNoVms() { + Assert.assertEquals(0, commandExecLogDaoImplSpy.expungeByVmList( + new ArrayList<>(), 100L)); + Assert.assertEquals(0, commandExecLogDaoImplSpy.expungeByVmList( + null, 100L)); + } + + @Test + public void testExpungeByVmList() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doAnswer((Answer) invocationOnMock -> { + Long batchSize = (Long)invocationOnMock.getArguments()[1]; + return batchSize == null ? 0 : batchSize.intValue(); + }).when(commandExecLogDaoImplSpy).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); + Mockito.when(commandExecLogDaoImplSpy.createSearchBuilder()).thenReturn(sb); + final CommandExecLogVO mockedVO = Mockito.mock(CommandExecLogVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + List vmIds = List.of(1L, 2L); + Object[] array = vmIds.toArray(); + Long batchSize = 50L; + Assert.assertEquals(batchSize.intValue(), commandExecLogDaoImplSpy.expungeByVmList(List.of(1L, 2L), batchSize)); + Mockito.verify(sc).setParameters("vmIds", array); + Mockito.verify(commandExecLogDaoImplSpy, Mockito.times(1)) + .batchExpunge(sc, batchSize); + } +} diff --git a/engine/schema/src/test/java/com/cloud/storage/dao/VolumeDaoImplTest.java b/engine/schema/src/test/java/com/cloud/storage/dao/VolumeDaoImplTest.java index 7968ee4a375..9445efeb089 100644 --- a/engine/schema/src/test/java/com/cloud/storage/dao/VolumeDaoImplTest.java +++ b/engine/schema/src/test/java/com/cloud/storage/dao/VolumeDaoImplTest.java @@ -26,16 +26,25 @@ import static org.mockito.Mockito.when; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; +import org.apache.commons.collections.CollectionUtils; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.MockedStatic; import org.mockito.Mockito; +import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; +import com.cloud.storage.VolumeVO; +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.TransactionLegacy; @RunWith(MockitoJUnitRunner.class) @@ -48,6 +57,7 @@ public class VolumeDaoImplTest { private static MockedStatic mockedTransactionLegacy; + @Spy private final VolumeDaoImpl volumeDao = new VolumeDaoImpl(); @BeforeClass @@ -102,4 +112,34 @@ public class VolumeDaoImplTest { verify(preparedStatementMock, times(2)).setLong(anyInt(), anyLong()); verify(preparedStatementMock, times(1)).executeQuery(); } + + @Test + public void testSearchRemovedByVmsNoVms() { + Assert.assertTrue(CollectionUtils.isEmpty(volumeDao.searchRemovedByVms( + new ArrayList<>(), 100L))); + Assert.assertTrue(CollectionUtils.isEmpty(volumeDao.searchRemovedByVms( + null, 100L))); + } + + @Test + public void testSearchRemovedByVms() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doReturn(new ArrayList<>()).when(volumeDao).searchIncludingRemoved( + Mockito.any(SearchCriteria.class), Mockito.any(Filter.class), Mockito.eq(null), + Mockito.eq(false)); + Mockito.when(volumeDao.createSearchBuilder()).thenReturn(sb); + final VolumeVO mockedVO = Mockito.mock(VolumeVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + List vmIds = List.of(1L, 2L); + Object[] array = vmIds.toArray(); + Long batchSize = 50L; + volumeDao.searchRemovedByVms(List.of(1L, 2L), batchSize); + Mockito.verify(sc).setParameters("vmIds", array); + Mockito.verify(volumeDao, Mockito.times(1)).searchIncludingRemoved( + Mockito.any(SearchCriteria.class), Mockito.any(Filter.class), Mockito.eq(null), + Mockito.eq(false)); + } + } diff --git a/engine/schema/src/test/java/com/cloud/vm/ItWorkDaoImplTest.java b/engine/schema/src/test/java/com/cloud/vm/ItWorkDaoImplTest.java new file mode 100644 index 00000000000..04bc125e05f --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/vm/ItWorkDaoImplTest.java @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class ItWorkDaoImplTest { + + @Spy + ItWorkDaoImpl itWorkDaoImplSpy; + + @Test + public void testExpungeByVmListNoVms() { + Assert.assertEquals(0, itWorkDaoImplSpy.expungeByVmList( + new ArrayList<>(), 100L)); + Assert.assertEquals(0, itWorkDaoImplSpy.expungeByVmList( + null, 100L)); + } + + @Test + public void testExpungeByVmList() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doAnswer((Answer) invocationOnMock -> { + Long batchSize = (Long)invocationOnMock.getArguments()[1]; + return batchSize == null ? 0 : batchSize.intValue(); + }).when(itWorkDaoImplSpy).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); + Mockito.when(itWorkDaoImplSpy.createSearchBuilder()).thenReturn(sb); + final ItWorkVO mockedVO = Mockito.mock(ItWorkVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + List vmIds = List.of(1L, 2L); + Object[] array = vmIds.toArray(); + Long batchSize = 50L; + Assert.assertEquals(batchSize.intValue(), itWorkDaoImplSpy.expungeByVmList(List.of(1L, 2L), batchSize)); + Mockito.verify(sc).setParameters("vmIds", array); + Mockito.verify(itWorkDaoImplSpy, Mockito.times(1)) + .batchExpunge(sc, batchSize); + } +} diff --git a/engine/schema/src/test/java/com/cloud/vm/dao/ConsoleSessionDaoImplTest.java b/engine/schema/src/test/java/com/cloud/vm/dao/ConsoleSessionDaoImplTest.java new file mode 100644 index 00000000000..c9919e26af6 --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/vm/dao/ConsoleSessionDaoImplTest.java @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm.dao; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.vm.ConsoleSessionVO; + +@RunWith(MockitoJUnitRunner.class) +public class ConsoleSessionDaoImplTest { + + @Spy + ConsoleSessionDaoImpl consoleSessionDaoImplSpy; + + @Test + public void testExpungeByVmListNoVms() { + Assert.assertEquals(0, consoleSessionDaoImplSpy.expungeByVmList( + new ArrayList<>(), 100L)); + Assert.assertEquals(0, consoleSessionDaoImplSpy.expungeByVmList( + null, 100L)); + } + + @Test + public void testExpungeByVmList() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doAnswer((Answer) invocationOnMock -> { + Long batchSize = (Long)invocationOnMock.getArguments()[1]; + return batchSize == null ? 0 : batchSize.intValue(); + }).when(consoleSessionDaoImplSpy).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); + Mockito.when(consoleSessionDaoImplSpy.createSearchBuilder()).thenReturn(sb); + final ConsoleSessionVO mockedVO = Mockito.mock(ConsoleSessionVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + List vmIds = List.of(1L, 2L); + Object[] array = vmIds.toArray(); + Long batchSize = 50L; + Assert.assertEquals(batchSize.intValue(), consoleSessionDaoImplSpy.expungeByVmList(List.of(1L, 2L), batchSize)); + Mockito.verify(sc).setParameters("vmIds", array); + Mockito.verify(consoleSessionDaoImplSpy, Mockito.times(1)) + .batchExpunge(sc, batchSize); + } +} diff --git a/engine/schema/src/test/java/com/cloud/vm/dao/NicDaoImplTest.java b/engine/schema/src/test/java/com/cloud/vm/dao/NicDaoImplTest.java new file mode 100644 index 00000000000..506fdb7fc92 --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/vm/dao/NicDaoImplTest.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm.dao; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.collections.CollectionUtils; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.vm.NicVO; + +@RunWith(MockitoJUnitRunner.class) +public class NicDaoImplTest { + + @Spy + NicDaoImpl nicDaoImplSpy; + + @Test + public void testSearchRemovedByVmsNoVms() { + Assert.assertTrue(CollectionUtils.isEmpty(nicDaoImplSpy.searchRemovedByVms( + new ArrayList<>(), 100L))); + Assert.assertTrue(CollectionUtils.isEmpty(nicDaoImplSpy.searchRemovedByVms( + null, 100L))); + } + + @Test + public void testSearchRemovedByVms() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doReturn(new ArrayList<>()).when(nicDaoImplSpy).searchIncludingRemoved( + Mockito.any(SearchCriteria.class), Mockito.any(Filter.class), Mockito.eq(null), + Mockito.eq(false)); + Mockito.when(nicDaoImplSpy.createSearchBuilder()).thenReturn(sb); + final NicVO mockedVO = Mockito.mock(NicVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + List vmIds = List.of(1L, 2L); + Object[] array = vmIds.toArray(); + Long batchSize = 50L; + nicDaoImplSpy.searchRemovedByVms(List.of(1L, 2L), batchSize); + Mockito.verify(sc).setParameters("vmIds", array); + Mockito.verify(nicDaoImplSpy, Mockito.times(1)).searchIncludingRemoved( + Mockito.any(SearchCriteria.class), Mockito.any(Filter.class), Mockito.eq(null), + Mockito.eq(false)); + } +} diff --git a/engine/schema/src/test/java/com/cloud/vm/dao/NicExtraDhcpOptionDaoImplTest.java b/engine/schema/src/test/java/com/cloud/vm/dao/NicExtraDhcpOptionDaoImplTest.java new file mode 100644 index 00000000000..7a1e32e95ca --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/vm/dao/NicExtraDhcpOptionDaoImplTest.java @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm.dao; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.vm.NicExtraDhcpOptionVO; + +@RunWith(MockitoJUnitRunner.class) +public class NicExtraDhcpOptionDaoImplTest { + + @Spy + NicExtraDhcpOptionDaoImpl nicExtraDhcpOptionDaoImplSpy; + + @Test + public void testExpungeByNicListNoVms() { + Assert.assertEquals(0, nicExtraDhcpOptionDaoImplSpy.expungeByNicList( + new ArrayList<>(), 100L)); + Assert.assertEquals(0, nicExtraDhcpOptionDaoImplSpy.expungeByNicList( + null, 100L)); + } + + @Test + public void testExpungeByNicList() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doAnswer((Answer) invocationOnMock -> { + Long batchSize = (Long)invocationOnMock.getArguments()[1]; + return batchSize == null ? 0 : batchSize.intValue(); + }).when(nicExtraDhcpOptionDaoImplSpy).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); + Mockito.when(nicExtraDhcpOptionDaoImplSpy.createSearchBuilder()).thenReturn(sb); + final NicExtraDhcpOptionVO mockedVO = Mockito.mock(NicExtraDhcpOptionVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + List vmIds = List.of(1L, 2L); + Object[] array = vmIds.toArray(); + Long batchSize = 50L; + Assert.assertEquals(batchSize.intValue(), nicExtraDhcpOptionDaoImplSpy.expungeByNicList(List.of(1L, 2L), batchSize)); + Mockito.verify(sc).setParameters("nicIds", array); + Mockito.verify(nicExtraDhcpOptionDaoImplSpy, Mockito.times(1)) + .batchExpunge(sc, batchSize); + } +} diff --git a/engine/schema/src/test/java/com/cloud/vm/dao/NicSecondaryIpDaoImplTest.java b/engine/schema/src/test/java/com/cloud/vm/dao/NicSecondaryIpDaoImplTest.java new file mode 100644 index 00000000000..a9f798dbc01 --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/vm/dao/NicSecondaryIpDaoImplTest.java @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm.dao; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class NicSecondaryIpDaoImplTest { + + @Spy + NicSecondaryIpDaoImpl nicSecondaryIpDaoImplSpy; + + @Test + public void testExpungeByVmListNoVms() { + Assert.assertEquals(0, nicSecondaryIpDaoImplSpy.expungeByVmList( + new ArrayList<>(), 100L)); + Assert.assertEquals(0, nicSecondaryIpDaoImplSpy.expungeByVmList( + null, 100L)); + } + + @Test + public void testExpungeByVmList() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doAnswer((Answer) invocationOnMock -> { + Long batchSize = (Long)invocationOnMock.getArguments()[1]; + return batchSize == null ? 0 : batchSize.intValue(); + }).when(nicSecondaryIpDaoImplSpy).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); + Mockito.when(nicSecondaryIpDaoImplSpy.createSearchBuilder()).thenReturn(sb); + final NicSecondaryIpVO mockedVO = Mockito.mock(NicSecondaryIpVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + List vmIds = List.of(1L, 2L); + Object[] array = vmIds.toArray(); + Long batchSize = 50L; + Assert.assertEquals(batchSize.intValue(), nicSecondaryIpDaoImplSpy.expungeByVmList(List.of(1L, 2L), batchSize)); + Mockito.verify(sc).setParameters("vmIds", array); + Mockito.verify(nicSecondaryIpDaoImplSpy, Mockito.times(1)) + .batchExpunge(sc, batchSize); + } +} diff --git a/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java b/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java index 4a32dc08359..43679081550 100644 --- a/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java +++ b/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java @@ -30,6 +30,8 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import java.util.ArrayList; +import java.util.Calendar; import java.util.Date; import org.joda.time.DateTime; @@ -37,10 +39,14 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; import org.mockito.Mock; +import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import org.mockito.Spy; import com.cloud.utils.Pair; +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; @@ -199,4 +205,29 @@ public class VMInstanceDaoImplTest { assertTrue(result); } + + @Test + public void testSearchRemovedByRemoveDate() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.when(vmInstanceDao.createSearchBuilder()).thenReturn(sb); + final VMInstanceVO mockedVO = Mockito.mock(VMInstanceVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + Mockito.doReturn(new ArrayList<>()).when(vmInstanceDao).searchIncludingRemoved( + Mockito.any(SearchCriteria.class), Mockito.any(Filter.class), Mockito.eq(null), + Mockito.eq(false)); + Calendar cal = Calendar.getInstance(); + Date endDate = new Date(); + cal.setTime(endDate); + cal.add(Calendar.DATE, -1 * 10); + Date startDate = cal.getTime(); + vmInstanceDao.searchRemovedByRemoveDate(startDate, endDate, 50L, new ArrayList<>()); + Mockito.verify(sc).setParameters("startDate", startDate); + Mockito.verify(sc).setParameters("endDate", endDate); + Mockito.verify(sc, Mockito.never()).setParameters(Mockito.eq("skippedVmIds"), Mockito.any()); + Mockito.verify(vmInstanceDao, Mockito.times(1)).searchIncludingRemoved( + Mockito.any(SearchCriteria.class), Mockito.any(Filter.class), Mockito.eq(null), + Mockito.eq(false)); + } } diff --git a/engine/schema/src/test/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImplTest.java b/engine/schema/src/test/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImplTest.java new file mode 100644 index 00000000000..e71518080d2 --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImplTest.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm.snapshot.dao; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.collections.CollectionUtils; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.vm.snapshot.VMSnapshotVO; + +@RunWith(MockitoJUnitRunner.class) +public class VMSnapshotDaoImplTest { + + @Spy + VMSnapshotDaoImpl vmSnapshotDaoImplSpy; + + @Test + public void testSearchRemovedByVmsNoVms() { + Assert.assertTrue(CollectionUtils.isEmpty(vmSnapshotDaoImplSpy.searchRemovedByVms( + new ArrayList<>(), 100L))); + Assert.assertTrue(CollectionUtils.isEmpty(vmSnapshotDaoImplSpy.searchRemovedByVms( + null, 100L))); + } + + @Test + public void testSearchRemovedByVms() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doReturn(new ArrayList<>()).when(vmSnapshotDaoImplSpy).searchIncludingRemoved( + Mockito.any(SearchCriteria.class), Mockito.any(Filter.class), Mockito.eq(null), + Mockito.eq(false)); + Mockito.when(vmSnapshotDaoImplSpy.createSearchBuilder()).thenReturn(sb); + final VMSnapshotVO mockedVO = Mockito.mock(VMSnapshotVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + List vmIds = List.of(1L, 2L); + Object[] array = vmIds.toArray(); + Long batchSize = 50L; + vmSnapshotDaoImplSpy.searchRemovedByVms(List.of(1L, 2L), batchSize); + Mockito.verify(sc).setParameters("vmIds", array); + Mockito.verify(vmSnapshotDaoImplSpy, Mockito.times(1)).searchIncludingRemoved( + Mockito.any(SearchCriteria.class), Mockito.any(Filter.class), Mockito.eq(null), + Mockito.eq(false)); + } +} diff --git a/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImplTest.java b/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImplTest.java new file mode 100644 index 00000000000..85240ab4a05 --- /dev/null +++ b/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImplTest.java @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.db; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class SnapshotDataStoreDaoImplTest { + + @Spy + SnapshotDataStoreDaoImpl snapshotDataStoreDaoImplSpy; + + @Test + public void testExpungeByVmListNoVms() { + Assert.assertEquals(0, snapshotDataStoreDaoImplSpy.expungeBySnapshotList( + new ArrayList<>(), 100L)); + Assert.assertEquals(0, snapshotDataStoreDaoImplSpy.expungeBySnapshotList( + null, 100L)); + } + + @Test + public void testExpungeByVmList() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doAnswer((Answer) invocationOnMock -> { + Long batchSize = (Long)invocationOnMock.getArguments()[1]; + return batchSize == null ? 0 : batchSize.intValue(); + }).when(snapshotDataStoreDaoImplSpy).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); + Mockito.when(snapshotDataStoreDaoImplSpy.createSearchBuilder()).thenReturn(sb); + final SnapshotDataStoreVO mockedVO = Mockito.mock(SnapshotDataStoreVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + List vmIds = List.of(1L, 2L); + Object[] array = vmIds.toArray(); + Long batchSize = 50L; + Assert.assertEquals(batchSize.intValue(), snapshotDataStoreDaoImplSpy.expungeBySnapshotList(List.of(1L, 2L), batchSize)); + Mockito.verify(sc).setParameters("snapshotIds", array); + Mockito.verify(snapshotDataStoreDaoImplSpy, Mockito.times(1)) + .batchExpunge(sc, batchSize); + } +} diff --git a/engine/schema/templateConfig.sh b/engine/schema/templateConfig.sh index fb12ad752a2..4205e06aa69 100644 --- a/engine/schema/templateConfig.sh +++ b/engine/schema/templateConfig.sh @@ -60,22 +60,23 @@ function createMetadataFile() { section="${template%%:*}" hvName=$(getGenericName $section) - templatename="systemvm-${section}-${VERSION}" - checksum=$(getChecksum "$fileData" "$VERSION-$hvName") downloadurl="${template#*:}" + arch=$(echo ${downloadurl#*"/systemvmtemplate-$VERSION-"} | cut -d'-' -f 1) + templatename="systemvm-${section%.*}-${VERSION}-${arch}" + checksum=$(getChecksum "$fileData" "$VERSION-${arch}-$hvName") filename=$(echo ${downloadurl##*'/'}) - echo -e "["$section"]\ntemplatename = $templatename\nchecksum = $checksum\ndownloadurl = $downloadurl\nfilename = $filename\n" >> $METADATAFILE + echo -e "["$section"]\ntemplatename = $templatename\nchecksum = $checksum\ndownloadurl = $downloadurl\nfilename = $filename\narch = $arch\n" >> $METADATAFILE done } declare -a templates getTemplateVersion $1 -templates=( "kvm:https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-$VERSION-kvm.qcow2.bz2" - "vmware:https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-$VERSION-vmware.ova" - "xenserver:https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$VERSION-xen.vhd.bz2" - "hyperv:https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$VERSION-hyperv.vhd.zip" - "lxc:https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$VERSION-kvm.qcow2.bz2" - "ovm3:https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$VERSION-ovm.raw.bz2" ) +templates=( "kvm:https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-$VERSION-x86_64-kvm.qcow2.bz2" + "vmware:https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-$VERSION-x86_64-vmware.ova" + "xenserver:https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$VERSION-x86_64-xen.vhd.bz2" + "hyperv:https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$VERSION-x86_64-hyperv.vhd.zip" + "lxc:https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$VERSION-x86_64-kvm.qcow2.bz2" + "ovm3:https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$VERSION-x86_64-ovm.raw.bz2" ) PARENTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/dist/systemvm-templates/" mkdir -p $PARENTPATH diff --git a/engine/service/pom.xml b/engine/service/pom.xml index a3e07890bb6..34221e1001d 100644 --- a/engine/service/pom.xml +++ b/engine/service/pom.xml @@ -66,6 +66,11 @@ engine + + org.apache.maven.plugins + maven-war-plugin + 3.4.0 + org.eclipse.jetty jetty-maven-plugin diff --git a/engine/storage/cache/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-cache-core-context.xml b/engine/storage/cache/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-cache-core-context.xml index f98b06da2a8..2038cfac945 100644 --- a/engine/storage/cache/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-cache-core-context.xml +++ b/engine/storage/cache/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-cache-core-context.xml @@ -34,5 +34,5 @@ - + diff --git a/engine/storage/configdrive/src/main/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilder.java b/engine/storage/configdrive/src/main/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilder.java index e1d51120efa..58cc341a87b 100644 --- a/engine/storage/configdrive/src/main/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilder.java +++ b/engine/storage/configdrive/src/main/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilder.java @@ -22,6 +22,8 @@ import static com.cloud.network.NetworkModel.CONFIGDATA_DIR; import static com.cloud.network.NetworkModel.CONFIGDATA_FILE; import static com.cloud.network.NetworkModel.PASSWORD_FILE; import static com.cloud.network.NetworkModel.USERDATA_FILE; +import static com.cloud.network.NetworkService.DEFAULT_MTU; +import static org.apache.cloudstack.storage.configdrive.ConfigDriveUtils.mergeJsonArraysAndUpdateObject; import java.io.File; import java.io.IOException; @@ -33,6 +35,9 @@ import java.util.List; import java.util.Map; import java.util.Set; +import com.cloud.network.Network; +import com.cloud.vm.NicProfile; +import com.googlecode.ipv6.IPv6Network; import org.apache.commons.codec.binary.Base64; import org.apache.commons.collections.MapUtils; import org.apache.commons.io.FileUtils; @@ -81,7 +86,7 @@ public class ConfigDriveBuilder { /** * Read the content of a {@link File} and convert it to a String in base 64. - * We expect the content of the file to be encoded using {@link StandardCharsets#US_ASC} + * We expect the content of the file to be encoded using {@link StandardCharsets#US_ASCII} */ public static String fileToBase64String(File isoFile) throws IOException { byte[] encoded = Base64.encodeBase64(FileUtils.readFileToByteArray(isoFile)); @@ -108,9 +113,9 @@ public class ConfigDriveBuilder { * This method will build the metadata files required by OpenStack driver. Then, an ISO is going to be generated and returned as a String in base 64. * If vmData is null, we throw a {@link CloudRuntimeException}. Moreover, {@link IOException} are captured and re-thrown as {@link CloudRuntimeException}. */ - public static String buildConfigDrive(List vmData, String isoFileName, String driveLabel, Map customUserdataParams) { - if (vmData == null) { - throw new CloudRuntimeException("No VM metadata provided"); + public static String buildConfigDrive(List nics, List vmData, String isoFileName, String driveLabel, Map customUserdataParams, Map> supportedServices) { + if (vmData == null && nics == null) { + throw new CloudRuntimeException("No VM metadata and nic profile provided"); } Path tempDir = null; @@ -121,10 +126,19 @@ public class ConfigDriveBuilder { File openStackFolder = new File(tempDirName + ConfigDrive.openStackConfigDriveName); - writeVendorAndNetworkEmptyJsonFile(openStackFolder); - writeVmMetadata(vmData, tempDirName, openStackFolder, customUserdataParams); + writeVendorEmptyJsonFile(openStackFolder); + writeNetworkData(nics, supportedServices, openStackFolder); + for (NicProfile nic: nics) { + if (supportedServices.get(nic.getId()).contains(Network.Service.UserData)) { + if (vmData == null) { + throw new CloudRuntimeException("No VM metadata provided"); + } + writeVmMetadata(vmData, tempDirName, openStackFolder, customUserdataParams); - linkUserData(tempDirName); + linkUserData(tempDirName); + break; + } + } return generateAndRetrieveIsoAsBase64Iso(isoFileName, driveLabel, tempDirName); } catch (IOException e) { @@ -212,18 +226,36 @@ public class ConfigDriveBuilder { } /** - * Writes the following empty JSON files: - *
    - *
  • vendor_data.json - *
  • network_data.json - *
- * - * If the folder does not exist and we cannot create it, we throw a {@link CloudRuntimeException}. + * First we generate a JSON object using {@link #getNetworkDataJsonObjectForNic(NicProfile, List)}, then we write it to a file called "network_data.json". */ - static void writeVendorAndNetworkEmptyJsonFile(File openStackFolder) { + static void writeNetworkData(List nics, Map> supportedServices, File openStackFolder) { + JsonObject finalNetworkData = new JsonObject(); + if (needForGeneratingNetworkData(supportedServices)) { + for (NicProfile nic : nics) { + List supportedService = supportedServices.get(nic.getId()); + JsonObject networkData = getNetworkDataJsonObjectForNic(nic, supportedService); + + mergeJsonArraysAndUpdateObject(finalNetworkData, networkData, "links", "id", "type"); + mergeJsonArraysAndUpdateObject(finalNetworkData, networkData, "networks", "id", "type"); + mergeJsonArraysAndUpdateObject(finalNetworkData, networkData, "services", "address", "type"); + } + } + + writeFile(openStackFolder, "network_data.json", finalNetworkData.toString()); + } + + static boolean needForGeneratingNetworkData(Map> supportedServices) { + return supportedServices.values().stream().anyMatch(services -> services.contains(Network.Service.Dhcp) || services.contains(Network.Service.Dns)); + } + + /** + * Writes an empty JSON file named vendor_data.json in openStackFolder + * + * If the folder does not exist, and we cannot create it, we throw a {@link CloudRuntimeException}. + */ + static void writeVendorEmptyJsonFile(File openStackFolder) { if (openStackFolder.exists() || openStackFolder.mkdirs()) { writeFile(openStackFolder, "vendor_data.json", "{}"); - writeFile(openStackFolder, "network_data.json", "{}"); } else { throw new CloudRuntimeException("Failed to create folder " + openStackFolder); } @@ -250,6 +282,120 @@ public class ConfigDriveBuilder { return metaData; } + /** + * Creates the {@link JsonObject} using @param nic's metadata. We expect the JSONObject to have the following entries: + *
    + *
  • links
  • + *
  • networks
  • + *
  • services
  • + *
+ */ + static JsonObject getNetworkDataJsonObjectForNic(NicProfile nic, List supportedServices) { + JsonObject networkData = new JsonObject(); + + JsonArray links = getLinksJsonArrayForNic(nic); + JsonArray networks = getNetworksJsonArrayForNic(nic); + if (links.size() > 0) { + networkData.add("links", links); + } + if (networks.size() > 0) { + networkData.add("networks", networks); + } + + JsonArray services = getServicesJsonArrayForNic(nic); + if (services.size() > 0) { + networkData.add("services", services); + } + + return networkData; + } + + static JsonArray getLinksJsonArrayForNic(NicProfile nic) { + JsonArray links = new JsonArray(); + if (StringUtils.isNotBlank(nic.getMacAddress())) { + JsonObject link = new JsonObject(); + link.addProperty("ethernet_mac_address", nic.getMacAddress()); + link.addProperty("id", String.format("eth%d", nic.getDeviceId())); + link.addProperty("mtu", nic.getMtu() != null ? nic.getMtu() : DEFAULT_MTU); + link.addProperty("type", "phy"); + links.add(link); + } + return links; + } + + static JsonArray getNetworksJsonArrayForNic(NicProfile nic) { + JsonArray networks = new JsonArray(); + if (StringUtils.isNotBlank(nic.getIPv4Address())) { + JsonObject ipv4Network = new JsonObject(); + ipv4Network.addProperty("id", String.format("eth%d", nic.getDeviceId())); + ipv4Network.addProperty("ip_address", nic.getIPv4Address()); + ipv4Network.addProperty("link", String.format("eth%d", nic.getDeviceId())); + ipv4Network.addProperty("netmask", nic.getIPv4Netmask()); + ipv4Network.addProperty("network_id", nic.getUuid()); + ipv4Network.addProperty("type", "ipv4"); + + JsonArray ipv4RouteArray = new JsonArray(); + JsonObject ipv4Route = new JsonObject(); + ipv4Route.addProperty("gateway", nic.getIPv4Gateway()); + ipv4Route.addProperty("netmask", "0.0.0.0"); + ipv4Route.addProperty("network", "0.0.0.0"); + ipv4RouteArray.add(ipv4Route); + + ipv4Network.add("routes", ipv4RouteArray); + + networks.add(ipv4Network); + } + + if (StringUtils.isNotBlank(nic.getIPv6Address())) { + JsonObject ipv6Network = new JsonObject(); + ipv6Network.addProperty("id", String.format("eth%d", nic.getDeviceId())); + ipv6Network.addProperty("ip_address", nic.getIPv6Address()); + ipv6Network.addProperty("link", String.format("eth%d", nic.getDeviceId())); + ipv6Network.addProperty("netmask", IPv6Network.fromString(nic.getIPv6Cidr()).getNetmask().toString()); + ipv6Network.addProperty("network_id", nic.getUuid()); + ipv6Network.addProperty("type", "ipv6"); + + JsonArray ipv6RouteArray = new JsonArray(); + JsonObject ipv6Route = new JsonObject(); + ipv6Route.addProperty("gateway", nic.getIPv6Gateway()); + ipv6Route.addProperty("netmask", "0"); + ipv6Route.addProperty("network", "::"); + ipv6RouteArray.add(ipv6Route); + + ipv6Network.add("routes", ipv6RouteArray); + + networks.add(ipv6Network); + } + return networks; + } + + static JsonArray getServicesJsonArrayForNic(NicProfile nic) { + JsonArray services = new JsonArray(); + if (StringUtils.isNotBlank(nic.getIPv4Dns1())) { + services.add(getDnsServiceObject(nic.getIPv4Dns1())); + } + + if (StringUtils.isNotBlank(nic.getIPv4Dns2())) { + services.add(getDnsServiceObject(nic.getIPv4Dns2())); + } + + if (StringUtils.isNotBlank(nic.getIPv6Dns1())) { + services.add(getDnsServiceObject(nic.getIPv6Dns1())); + } + + if (StringUtils.isNotBlank(nic.getIPv6Dns2())) { + services.add(getDnsServiceObject(nic.getIPv6Dns2())); + } + return services; + } + + private static JsonObject getDnsServiceObject(String dnsAddress) { + JsonObject dnsService = new JsonObject(); + dnsService.addProperty("address", dnsAddress); + dnsService.addProperty("type", "dns"); + return dnsService; + } + static void createFileInTempDirAnAppendOpenStackMetadataToJsonObject(String tempDirName, JsonObject metaData, String dataType, String fileName, String content, Map customUserdataParams) { if (StringUtils.isBlank(dataType)) { return; diff --git a/engine/storage/configdrive/src/main/java/org/apache/cloudstack/storage/configdrive/ConfigDriveUtils.java b/engine/storage/configdrive/src/main/java/org/apache/cloudstack/storage/configdrive/ConfigDriveUtils.java new file mode 100644 index 00000000000..8847497f193 --- /dev/null +++ b/engine/storage/configdrive/src/main/java/org/apache/cloudstack/storage/configdrive/ConfigDriveUtils.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.configdrive; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +public class ConfigDriveUtils { + + static void mergeJsonArraysAndUpdateObject(JsonObject finalObject, JsonObject newObj, String memberName, String... keys) { + JsonArray existingMembers = finalObject.has(memberName) ? finalObject.get(memberName).getAsJsonArray() : new JsonArray(); + JsonArray newMembers = newObj.has(memberName) ? newObj.get(memberName).getAsJsonArray() : new JsonArray(); + + if (existingMembers.size() > 0 || newMembers.size() > 0) { + JsonArray finalMembers = new JsonArray(); + Set idSet = new HashSet<>(); + for (JsonElement element : existingMembers.getAsJsonArray()) { + JsonObject elementObject = element.getAsJsonObject(); + String key = Arrays.stream(keys).map(elementObject::get).map(JsonElement::getAsString).reduce((a, b) -> a + "-" + b).orElse(""); + idSet.add(key); + finalMembers.add(element); + } + for (JsonElement element : newMembers.getAsJsonArray()) { + JsonObject elementObject = element.getAsJsonObject(); + String key = Arrays.stream(keys).map(elementObject::get).map(JsonElement::getAsString).reduce((a, b) -> a + "-" + b).orElse(""); + if (!idSet.contains(key)) { + finalMembers.add(element); + } + } + finalObject.add(memberName, finalMembers); + } + } + +} diff --git a/engine/storage/configdrive/src/test/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilderTest.java b/engine/storage/configdrive/src/test/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilderTest.java index eff881065c2..3effdb5ba21 100644 --- a/engine/storage/configdrive/src/test/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilderTest.java +++ b/engine/storage/configdrive/src/test/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilderTest.java @@ -20,6 +20,7 @@ package org.apache.cloudstack.storage.configdrive; import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.nullable; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import java.io.File; @@ -27,14 +28,21 @@ import java.io.IOException; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import com.cloud.network.Network; +import com.cloud.vm.NicProfile; +import com.google.gson.JsonParser; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; import org.junit.Assert; +import org.junit.BeforeClass; import org.junit.Test; +import org.junit.rules.TemporaryFolder; import org.junit.runner.RunWith; import org.mockito.InOrder; import org.mockito.MockedConstruction; @@ -49,6 +57,13 @@ import com.google.gson.JsonObject; @RunWith(MockitoJUnitRunner.class) public class ConfigDriveBuilderTest { + private static Map> supportedServices; + + @BeforeClass + public static void beforeClass() throws Exception { + supportedServices = Map.of(1L, List.of(Network.Service.UserData, Network.Service.Dhcp, Network.Service.Dns)); + } + @Test public void writeFileTest() { try (MockedStatic fileUtilsMocked = Mockito.mockStatic(FileUtils.class)) { @@ -112,16 +127,16 @@ public class ConfigDriveBuilderTest { } @Test(expected = CloudRuntimeException.class) - public void buildConfigDriveTestNoVmData() { - ConfigDriveBuilder.buildConfigDrive(null, "teste", "C:", null); + public void buildConfigDriveTestNoVmDataAndNic() { + ConfigDriveBuilder.buildConfigDrive(null, null, "teste", "C:", null, null); } @Test(expected = CloudRuntimeException.class) public void buildConfigDriveTestIoException() { try (MockedStatic configDriveBuilderMocked = Mockito.mockStatic(ConfigDriveBuilder.class)) { - configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVendorAndNetworkEmptyJsonFile(nullable(File.class))).thenThrow(CloudRuntimeException.class); - Mockito.when(ConfigDriveBuilder.buildConfigDrive(new ArrayList<>(), "teste", "C:", null)).thenCallRealMethod(); - ConfigDriveBuilder.buildConfigDrive(new ArrayList<>(), "teste", "C:", null); + configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVendorEmptyJsonFile(nullable(File.class))).thenThrow(CloudRuntimeException.class); + Mockito.when(ConfigDriveBuilder.buildConfigDrive(null, new ArrayList<>(), "teste", "C:", null, supportedServices)).thenCallRealMethod(); + ConfigDriveBuilder.buildConfigDrive(null, new ArrayList<>(), "teste", "C:", null, supportedServices); } } @@ -129,22 +144,26 @@ public class ConfigDriveBuilderTest { public void buildConfigDriveTest() { try (MockedStatic configDriveBuilderMocked = Mockito.mockStatic(ConfigDriveBuilder.class)) { - configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVendorAndNetworkEmptyJsonFile(Mockito.any(File.class))).then(invocationOnMock -> null); + configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVendorEmptyJsonFile(Mockito.any(File.class))).then(invocationOnMock -> null); configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVmMetadata(Mockito.anyList(), Mockito.anyString(), Mockito.any(File.class), anyMap())).then(invocationOnMock -> null); configDriveBuilderMocked.when(() -> ConfigDriveBuilder.linkUserData((Mockito.anyString()))).then(invocationOnMock -> null); configDriveBuilderMocked.when(() -> ConfigDriveBuilder.generateAndRetrieveIsoAsBase64Iso(Mockito.anyString(), Mockito.anyString(), Mockito.anyString())).thenAnswer(invocation -> "mockIsoDataBase64"); - //force execution of real method - Mockito.when(ConfigDriveBuilder.buildConfigDrive(new ArrayList<>(), "teste", "C:", null)).thenCallRealMethod(); - String returnedIsoData = ConfigDriveBuilder.buildConfigDrive(new ArrayList<>(), "teste", "C:", null); + NicProfile mockedNicProfile = Mockito.mock(NicProfile.class); + Mockito.when(mockedNicProfile.getId()).thenReturn(1L); + + //force execution of real method + Mockito.when(ConfigDriveBuilder.buildConfigDrive(List.of(mockedNicProfile), new ArrayList<>(), "teste", "C:", null, supportedServices)).thenCallRealMethod(); + + String returnedIsoData = ConfigDriveBuilder.buildConfigDrive(List.of(mockedNicProfile), new ArrayList<>(), "teste", "C:", null, supportedServices); Assert.assertEquals("mockIsoDataBase64", returnedIsoData); configDriveBuilderMocked.verify(() -> { - ConfigDriveBuilder.writeVendorAndNetworkEmptyJsonFile(Mockito.any(File.class)); + ConfigDriveBuilder.writeVendorEmptyJsonFile(Mockito.any(File.class)); ConfigDriveBuilder.writeVmMetadata(Mockito.anyList(), Mockito.anyString(), Mockito.any(File.class), anyMap()); ConfigDriveBuilder.linkUserData(Mockito.anyString()); ConfigDriveBuilder.generateAndRetrieveIsoAsBase64Iso(Mockito.anyString(), Mockito.anyString(), Mockito.anyString()); @@ -153,23 +172,23 @@ public class ConfigDriveBuilderTest { } @Test(expected = CloudRuntimeException.class) - public void writeVendorAndNetworkEmptyJsonFileTestCannotCreateOpenStackFolder() { + public void writeVendorEmptyJsonFileTestCannotCreateOpenStackFolder() { File folderFileMock = Mockito.mock(File.class); Mockito.doReturn(false).when(folderFileMock).mkdirs(); - ConfigDriveBuilder.writeVendorAndNetworkEmptyJsonFile(folderFileMock); + ConfigDriveBuilder.writeVendorEmptyJsonFile(folderFileMock); } @Test(expected = CloudRuntimeException.class) - public void writeVendorAndNetworkEmptyJsonFileTest() { + public void writeVendorEmptyJsonFileTest() { File folderFileMock = Mockito.mock(File.class); Mockito.doReturn(false).when(folderFileMock).mkdirs(); - ConfigDriveBuilder.writeVendorAndNetworkEmptyJsonFile(folderFileMock); + ConfigDriveBuilder.writeVendorEmptyJsonFile(folderFileMock); } @Test - public void writeVendorAndNetworkEmptyJsonFileTestCreatingFolder() { + public void writeVendorEmptyJsonFileTestCreatingFolder() { try (MockedStatic configDriveBuilderMocked = Mockito.mockStatic(ConfigDriveBuilder.class)) { File folderFileMock = Mockito.mock(File.class); @@ -177,9 +196,9 @@ public class ConfigDriveBuilderTest { Mockito.doReturn(true).when(folderFileMock).mkdirs(); //force execution of real method - configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVendorAndNetworkEmptyJsonFile(folderFileMock)).thenCallRealMethod(); + configDriveBuilderMocked.when(() -> ConfigDriveBuilder.writeVendorEmptyJsonFile(folderFileMock)).thenCallRealMethod(); - ConfigDriveBuilder.writeVendorAndNetworkEmptyJsonFile(folderFileMock); + ConfigDriveBuilder.writeVendorEmptyJsonFile(folderFileMock); Mockito.verify(folderFileMock).exists(); Mockito.verify(folderFileMock).mkdirs(); @@ -501,4 +520,143 @@ public class ConfigDriveBuilderTest { Mockito.verify(mkIsoProgramInMacOsFileMock, Mockito.times(1)).getCanonicalPath(); } } + + @Test + public void testWriteNetworkData() throws Exception { + // Setup + NicProfile nicp = mock(NicProfile.class); + Mockito.when(nicp.getId()).thenReturn(1L); + + Mockito.when(nicp.getMacAddress()).thenReturn("00:00:00:00:00:00"); + Mockito.when(nicp.getMtu()).thenReturn(2000); + + Mockito.when(nicp.getIPv4Address()).thenReturn("172.31.0.10"); + Mockito.when(nicp.getDeviceId()).thenReturn(1); + Mockito.when(nicp.getIPv4Netmask()).thenReturn("255.255.255.0"); + Mockito.when(nicp.getUuid()).thenReturn("NETWORK UUID"); + Mockito.when(nicp.getIPv4Gateway()).thenReturn("172.31.0.1"); + + + Mockito.when(nicp.getIPv6Address()).thenReturn("2001:db8:0:1234:0:567:8:1"); + Mockito.when(nicp.getIPv6Cidr()).thenReturn("2001:db8:0:1234:0:567:8:1/64"); + Mockito.when(nicp.getIPv6Gateway()).thenReturn("2001:db8:0:1234:0:567:8::1"); + + Mockito.when(nicp.getIPv4Dns1()).thenReturn("8.8.8.8"); + Mockito.when(nicp.getIPv4Dns2()).thenReturn("1.1.1.1"); + Mockito.when(nicp.getIPv6Dns1()).thenReturn("2001:4860:4860::8888"); + Mockito.when(nicp.getIPv6Dns2()).thenReturn("2001:4860:4860::8844"); + + + List services1 = Arrays.asList(Network.Service.Dhcp, Network.Service.Dns); + + Map> supportedServices = new HashMap<>(); + supportedServices.put(1L, services1); + + TemporaryFolder folder = new TemporaryFolder(); + folder.create(); + File openStackFolder = folder.newFolder("openStack"); + + // Expected JSON structure + String expectedJson = "{" + + " \"links\": [" + + " {" + + " \"ethernet_mac_address\": \"00:00:00:00:00:00\"," + + " \"id\": \"eth1\"," + + " \"mtu\": 2000," + + " \"type\": \"phy\"" + + " }" + + " ]," + + " \"networks\": [" + + " {" + + " \"id\": \"eth1\"," + + " \"ip_address\": \"172.31.0.10\"," + + " \"link\": \"eth1\"," + + " \"netmask\": \"255.255.255.0\"," + + " \"network_id\": \"NETWORK UUID\"," + + " \"type\": \"ipv4\"," + + " \"routes\": [" + + " {" + + " \"gateway\": \"172.31.0.1\"," + + " \"netmask\": \"0.0.0.0\"," + + " \"network\": \"0.0.0.0\"" + + " }" + + " ]" + + " }," + + " {" + + " \"id\": \"eth1\"," + + " \"ip_address\": \"2001:db8:0:1234:0:567:8:1\"," + + " \"link\": \"eth1\"," + + " \"netmask\": \"64\"," + + " \"network_id\": \"NETWORK UUID\"," + + " \"type\": \"ipv6\"," + + " \"routes\": [" + + " {" + + " \"gateway\": \"2001:db8:0:1234:0:567:8::1\"," + + " \"netmask\": \"0\"," + + " \"network\": \"::\"" + + " }" + + " ]" + + " }" + + " ]," + + " \"services\": [" + + " {" + + " \"address\": \"8.8.8.8\"," + + " \"type\": \"dns\"" + + " }," + + " {" + + " \"address\": \"1.1.1.1\"," + + " \"type\": \"dns\"" + + " }," + + " {" + + " \"address\": \"2001:4860:4860::8888\"," + + " \"type\": \"dns\"" + + " }," + + " {" + + " \"address\": \"2001:4860:4860::8844\"," + + " \"type\": \"dns\"" + + " }" + + " ]" + + "}"; + + // Action + ConfigDriveBuilder.writeNetworkData(Arrays.asList(nicp), supportedServices, openStackFolder); + + // Verify + File networkDataFile = new File(openStackFolder, "network_data.json"); + String content = FileUtils.readFileToString(networkDataFile, StandardCharsets.UTF_8); + JsonObject actualJson = new JsonParser().parse(content).getAsJsonObject(); + JsonObject expectedJsonObject = new JsonParser().parse(expectedJson).getAsJsonObject(); + + Assert.assertEquals(expectedJsonObject, actualJson); + folder.delete(); + } + + @Test + public void testWriteNetworkDataEmptyJson() throws Exception { + // Setup + NicProfile nicp = mock(NicProfile.class); + List services1 = Collections.emptyList(); + + Map> supportedServices = new HashMap<>(); + supportedServices.put(1L, services1); + + TemporaryFolder folder = new TemporaryFolder(); + folder.create(); + File openStackFolder = folder.newFolder("openStack"); + + // Expected JSON structure + String expectedJson = "{}"; + + // Action + ConfigDriveBuilder.writeNetworkData(Arrays.asList(nicp), supportedServices, openStackFolder); + + // Verify + File networkDataFile = new File(openStackFolder, "network_data.json"); + String content = FileUtils.readFileToString(networkDataFile, StandardCharsets.UTF_8); + JsonObject actualJson = new JsonParser().parse(content).getAsJsonObject(); + JsonObject expectedJsonObject = new JsonParser().parse(expectedJson).getAsJsonObject(); + + Assert.assertEquals(expectedJsonObject, actualJson); + folder.delete(); + } } diff --git a/engine/storage/configdrive/src/test/java/org/apache/cloudstack/storage/configdrive/ConfigDriveUtilsTest.java b/engine/storage/configdrive/src/test/java/org/apache/cloudstack/storage/configdrive/ConfigDriveUtilsTest.java new file mode 100644 index 00000000000..6e935b951da --- /dev/null +++ b/engine/storage/configdrive/src/test/java/org/apache/cloudstack/storage/configdrive/ConfigDriveUtilsTest.java @@ -0,0 +1,108 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.configdrive; + +import static org.apache.cloudstack.storage.configdrive.ConfigDriveUtils.mergeJsonArraysAndUpdateObject; + +import com.google.gson.JsonArray; +import com.google.gson.JsonParser; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +import com.google.gson.JsonObject; + +@RunWith(MockitoJUnitRunner.class) +public class ConfigDriveUtilsTest { + + @Test + public void testMergeJsonArraysAndUpdateObjectWithEmptyObjects() { + JsonObject finalObject = new JsonObject(); + JsonObject newObj = new JsonObject(); + mergeJsonArraysAndUpdateObject(finalObject, newObj, "links", "id", "type"); + Assert.assertEquals("{}", finalObject.toString()); + } + + @Test + public void testMergeJsonArraysAndUpdateObjectWithNewMembersAdded() { + JsonObject finalObject = new JsonObject(); + + JsonObject newObj = new JsonObject(); + JsonArray newMembers = new JsonArray(); + JsonObject newMember = new JsonObject(); + newMember.addProperty("id", "eth0"); + newMember.addProperty("type", "phy"); + newMembers.add(newMember); + newObj.add("links", newMembers); + + mergeJsonArraysAndUpdateObject(finalObject, newObj, "links", "id", "type"); + Assert.assertEquals(1, finalObject.getAsJsonArray("links").size()); + JsonObject expectedObj = new JsonParser().parse("{'links': [{'id': 'eth0', 'type': 'phy'}]}").getAsJsonObject(); + Assert.assertEquals(expectedObj, finalObject); + } + + @Test + public void testMergeJsonArraysAndUpdateObjectWithDuplicateMembersIgnored() { + JsonObject finalObject = new JsonObject(); + JsonArray existingMembers = new JsonArray(); + JsonObject existingMember = new JsonObject(); + existingMember.addProperty("id", "eth0"); + existingMember.addProperty("type", "phy"); + existingMembers.add(existingMember); + finalObject.add("links", existingMembers); + + JsonObject newObj = new JsonObject(); + newObj.add("links", existingMembers); // same as existingMembers for duplication + + mergeJsonArraysAndUpdateObject(finalObject, newObj, "links", "id", "type"); + Assert.assertEquals(1, finalObject.getAsJsonArray("links").size()); + JsonObject expectedObj = new JsonParser().parse("{'links': [{'id': 'eth0', 'type': 'phy'}]}").getAsJsonObject(); + Assert.assertEquals(expectedObj, finalObject); + } + + @Test + public void testMergeJsonArraysAndUpdateObjectWithDifferentMembers() { + JsonObject finalObject = new JsonObject(); + + JsonArray newMembers = new JsonArray(); + JsonObject newMember = new JsonObject(); + newMember.addProperty("id", "eth0"); + newMember.addProperty("type", "phy"); + newMembers.add(newMember); + finalObject.add("links", newMembers); + + JsonObject newObj = new JsonObject(); + newMembers = new JsonArray(); + newMember = new JsonObject(); + newMember.addProperty("id", "eth1"); + newMember.addProperty("type", "phy"); + newMembers.add(newMember); + newObj.add("links", newMembers); + + mergeJsonArraysAndUpdateObject(finalObject, newObj, "links", "id", "type"); + Assert.assertEquals(2, finalObject.getAsJsonArray("links").size()); + JsonObject expectedObj = new JsonParser().parse("{'links': [{'id': 'eth0', 'type': 'phy'}, {'id': 'eth1', 'type': 'phy'}]}").getAsJsonObject(); + Assert.assertEquals(expectedObj, finalObject); + } + + @Test(expected = NullPointerException.class) + public void testMergeJsonArraysAndUpdateObjectWithNullObjects() { + mergeJsonArraysAndUpdateObject(null, null, "services", "id", "type"); + } +} diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 03aa5b50988..07b53842640 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -24,6 +24,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Random; import java.util.Set; import java.util.UUID; @@ -44,6 +45,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; @@ -69,7 +71,6 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -82,7 +83,6 @@ import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo; import com.cloud.agent.api.ModifyTargetsAnswer; import com.cloud.agent.api.ModifyTargetsCommand; import com.cloud.agent.api.PrepareForMigrationCommand; -import com.cloud.agent.api.storage.CheckStorageAvailabilityCommand; import com.cloud.agent.api.storage.CopyVolumeAnswer; import com.cloud.agent.api.storage.CopyVolumeCommand; import com.cloud.agent.api.storage.MigrateVolumeAnswer; @@ -104,6 +104,7 @@ import com.cloud.resource.ResourceState; import com.cloud.storage.DataStoreRole; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.MigrationOptions; +import com.cloud.storage.ScopeType; import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage; @@ -141,12 +142,16 @@ import java.util.HashSet; import java.util.stream.Collectors; import org.apache.commons.collections.CollectionUtils; +import static org.apache.cloudstack.vm.UnmanagedVMsManagerImpl.KVM_VM_IMPORT_DEFAULT_TEMPLATE_NAME; +import static org.apache.cloudstack.vm.UnmanagedVMsManagerImpl.VM_IMPORT_DEFAULT_TEMPLATE_NAME; + public class StorageSystemDataMotionStrategy implements DataMotionStrategy { protected Logger logger = LogManager.getLogger(getClass()); private static final Random RANDOM = new Random(System.nanoTime()); private static final int LOCK_TIME_IN_SECONDS = 300; private static final String OPERATION_NOT_SUPPORTED = "This operation is not supported."; + @Inject protected AgentManager agentManager; @Inject @@ -684,8 +689,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { private void handleVolumeMigrationFromNonManagedStorageToManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, AsyncCompletionCallback callback) { - String errMsg = null; - try { HypervisorType hypervisorType = srcVolumeInfo.getHypervisorType(); @@ -696,37 +699,21 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { if (HypervisorType.XenServer.equals(hypervisorType)) { handleVolumeMigrationForXenServer(srcVolumeInfo, destVolumeInfo); - } - else { + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + DataTO dataTO = destVolumeInfo.getTO(); + CopyCmdAnswer copyCmdAnswer = new CopyCmdAnswer(dataTO); + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + callback.complete(result); + } else { handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback); } } catch (Exception ex) { - errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromNonManagedStorageToManagedStorage': " + + String errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromNonManagedStorageToManagedStorage': " + ex.getMessage(); throw new CloudRuntimeException(errMsg, ex); } - finally { - CopyCmdAnswer copyCmdAnswer; - - if (errMsg != null) { - copyCmdAnswer = new CopyCmdAnswer(errMsg); - } - else { - destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); - - DataTO dataTO = destVolumeInfo.getTO(); - - copyCmdAnswer = new CopyCmdAnswer(dataTO); - } - - CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); - - result.setResult(errMsg); - - callback.complete(result); - } } private void handleVolumeMigrationForXenServer(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) { @@ -845,12 +832,25 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { checkAvailableForMigration(vm); String errMsg = null; + HostVO hostVO = null; try { destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null); VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId()); updatePathFromScsiName(volumeVO); destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); - HostVO hostVO = getHostOnWhichToExecuteMigrationCommand(srcVolumeInfo, destVolumeInfo); + hostVO = getHostOnWhichToExecuteMigrationCommand(srcVolumeInfo, destVolumeInfo); + + // if managed we need to grant access + PrimaryDataStore pds = (PrimaryDataStore)this.dataStoreMgr.getPrimaryDataStore(destVolumeInfo.getDataStore().getUuid()); + if (pds == null) { + throw new CloudRuntimeException("Unable to find primary data store driver for this volume"); + } + + // grant access (for managed volumes) + _volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); + + // re-retrieve volume to get any updated information from grant + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); // migrate the volume via the hypervisor String path = migrateVolumeForKVM(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from non-managed storage to managed storage"); @@ -871,6 +871,18 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { throw new CloudRuntimeException(errMsg, ex); } } finally { + // revoke access (for managed volumes) + if (hostVO != null) { + try { + _volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); + } catch (Exception e) { + logger.warn(String.format("Failed to revoke access for volume 'name=%s,uuid=%s' after a migration attempt", destVolumeInfo.getVolume(), destVolumeInfo.getUuid()), e); + } + } + + // re-retrieve volume to get any updated information from grant + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + CopyCmdAnswer copyCmdAnswer; if (errMsg != null) { copyCmdAnswer = new CopyCmdAnswer(errMsg); @@ -911,16 +923,141 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { HostVO hostVO; - if (srcStoragePoolVO.getClusterId() != null) { - hostVO = getHostInCluster(srcStoragePoolVO.getClusterId()); - } - else { - hostVO = getHost(destVolumeInfo.getDataCenterId(), HypervisorType.KVM, false); + // if either source or destination is a HOST-scoped storage pool, the migration MUST be performed on that host + if (ScopeType.HOST.equals(srcVolumeInfo.getDataStore().getScope().getScopeType())) { + hostVO = _hostDao.findById(srcVolumeInfo.getDataStore().getScope().getScopeId()); + } else if (ScopeType.HOST.equals(destVolumeInfo.getDataStore().getScope().getScopeType())) { + hostVO = _hostDao.findById(destVolumeInfo.getDataStore().getScope().getScopeId()); + } else { + if (srcStoragePoolVO.getClusterId() != null) { + hostVO = getHostInCluster(srcStoragePoolVO.getClusterId()); + } else { + hostVO = getHost(destVolumeInfo.getDataCenterId(), HypervisorType.KVM, false); + } } return hostVO; } + private VolumeInfo createTemporaryVolumeCopyOfSnapshotAdaptive(SnapshotInfo snapshotInfo) { + VolumeInfo tempVolumeInfo = null; + VolumeVO tempVolumeVO = null; + try { + tempVolumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP", + snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, ""); + tempVolumeVO.setPoolId(snapshotInfo.getDataStore().getId()); + _volumeDao.persist(tempVolumeVO); + tempVolumeInfo = this._volFactory.getVolume(tempVolumeVO.getId()); + + if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) { + snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null); + // refresh volume info as data could have changed + tempVolumeInfo = this._volFactory.getVolume(tempVolumeVO.getId()); + } else { + throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so"); + } + return tempVolumeInfo; + } catch (Throwable e) { + try { + if (tempVolumeInfo != null) { + tempVolumeInfo.getDataStore().getDriver().deleteAsync(tempVolumeInfo.getDataStore(), tempVolumeInfo, null); + } + + // cleanup temporary volume + if (tempVolumeVO != null) { + _volumeDao.remove(tempVolumeVO.getId()); + } + } catch (Throwable e2) { + logger.warn("Failed to delete temporary volume created for copy", e2); + } + + throw e; + } + } + + /** + * Simplier logic for copy from snapshot for adaptive driver only. + * @param snapshotInfo + * @param destData + * @param callback + */ + private void handleCopyAsyncToSecondaryStorageAdaptive(SnapshotInfo snapshotInfo, DataObject destData, AsyncCompletionCallback callback) { + CopyCmdAnswer copyCmdAnswer = null; + DataObject srcFinal = null; + HostVO hostVO = null; + DataStore srcDataStore = null; + boolean tempRequired = false; + + try { + snapshotInfo.processEvent(Event.CopyingRequested); + hostVO = getHost(snapshotInfo); + DataObject destOnStore = destData; + srcDataStore = snapshotInfo.getDataStore(); + int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); + CopyCommand copyCommand = null; + if (!Boolean.parseBoolean(srcDataStore.getDriver().getCapabilities().get("CAN_DIRECT_ATTACH_SNAPSHOT"))) { + srcFinal = createTemporaryVolumeCopyOfSnapshotAdaptive(snapshotInfo); + tempRequired = true; + } else { + srcFinal = snapshotInfo; + } + + _volumeService.grantAccess(srcFinal, hostVO, srcDataStore); + + DataTO srcTo = srcFinal.getTO(); + + // have to set PATH as extraOptions due to logic in KVM hypervisor processor + HashMap extraDetails = new HashMap<>(); + extraDetails.put(DiskTO.PATH, srcTo.getPath()); + + copyCommand = new CopyCommand(srcFinal.getTO(), destOnStore.getTO(), primaryStorageDownloadWait, + VirtualMachineManager.ExecuteInSequence.value()); + copyCommand.setOptions(extraDetails); + copyCmdAnswer = (CopyCmdAnswer)agentManager.send(hostVO.getId(), copyCommand); + } catch (Exception ex) { + String msg = "Failed to create template from snapshot (Snapshot ID = " + snapshotInfo.getId() + ") : "; + logger.warn(msg, ex); + throw new CloudRuntimeException(msg + ex.getMessage(), ex); + } + finally { + // remove access tot he volume that was used + if (srcFinal != null && hostVO != null && srcDataStore != null) { + _volumeService.revokeAccess(srcFinal, hostVO, srcDataStore); + } + + // delete the temporary volume if it was needed + if (srcFinal != null && tempRequired) { + try { + srcFinal.getDataStore().getDriver().deleteAsync(srcFinal.getDataStore(), srcFinal, null); + } catch (Throwable e) { + logger.warn("Failed to delete temporary volume created for copy", e); + } + } + + // check we have a reasonable result + String errMsg = null; + if (copyCmdAnswer == null || (!copyCmdAnswer.getResult() && copyCmdAnswer.getDetails() == null)) { + errMsg = "Unable to create template from snapshot"; + copyCmdAnswer = new CopyCmdAnswer(errMsg); + } else if (!copyCmdAnswer.getResult() && StringUtils.isEmpty(copyCmdAnswer.getDetails())) { + errMsg = "Unable to create template from snapshot"; + } else if (!copyCmdAnswer.getResult()) { + errMsg = copyCmdAnswer.getDetails(); + } + + //submit processEvent + if (StringUtils.isEmpty(errMsg)) { + snapshotInfo.processEvent(Event.OperationSuccessed); + } else { + snapshotInfo.processEvent(Event.OperationFailed); + } + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + result.setResult(copyCmdAnswer.getDetails()); + callback.complete(result); + } + } + /** * This function is responsible for copying a snapshot from managed storage to secondary storage. This is used in the following two cases: * 1) When creating a template from a snapshot @@ -931,6 +1068,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { * @param callback callback for async */ private void handleCopyAsyncToSecondaryStorage(SnapshotInfo snapshotInfo, DataObject destData, AsyncCompletionCallback callback) { + + // if this flag is set (true or false), we will fall out to use simplier logic for the Adaptive handler + if (snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_DIRECT_ATTACH_SNAPSHOT") != null) { + handleCopyAsyncToSecondaryStorageAdaptive(snapshotInfo, destData, callback); + return; + } + String errMsg = null; CopyCmdAnswer copyCmdAnswer = null; boolean usingBackendSnapshot = false; @@ -1697,14 +1841,13 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { private CopyCmdAnswer copyImageToVolume(DataObject srcDataObject, VolumeInfo destVolumeInfo, HostVO hostVO) { int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); - CopyCommand copyCommand = new CopyCommand(srcDataObject.getTO(), destVolumeInfo.getTO(), primaryStorageDownloadWait, - VirtualMachineManager.ExecuteInSequence.value()); - CopyCmdAnswer copyCmdAnswer; try { _volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); + CopyCommand copyCommand = new CopyCommand(srcDataObject.getTO(), destVolumeInfo.getTO(), primaryStorageDownloadWait, + VirtualMachineManager.ExecuteInSequence.value()); Map destDetails = getVolumeDetails(destVolumeInfo); copyCommand.setOptions2(destDetails); @@ -1729,42 +1872,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { return copyCmdAnswer; } - /** - * Use normal volume semantics (create a volume known to cloudstack, ask the storage driver to create it as a copy of the snapshot) - - * @param volumeVO - * @param snapshotInfo - */ - public void prepTempVolumeForCopyFromSnapshot(SnapshotInfo snapshotInfo) { - VolumeVO volumeVO = null; - try { - volumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP", - snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, ""); - volumeVO.setPoolId(snapshotInfo.getDataStore().getId()); - _volumeDao.persist(volumeVO); - VolumeInfo tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); - - if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) { - snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null); - // refresh volume info as data could have changed - tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); - // save the "temp" volume info into the snapshot details (we need this to clean up at the end) - _snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID", tempVolumeInfo.getUuid(), true); - _snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyPath", tempVolumeInfo.getPath(), true); - // NOTE: for this to work, the Driver must return a custom SnapshotObjectTO object from getTO() - // whenever the TemporaryVolumeCopyPath is set. - } else { - throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so"); - } - } catch (Throwable e) { - // cleanup temporary volume - if (volumeVO != null) { - _volumeDao.remove(volumeVO.getId()); - } - throw e; - } - } - /** * If the underlying storage system is making use of read-only snapshots, this gives the storage system the opportunity to * create a volume from the snapshot so that we can copy the VHD file that should be inside of the snapshot to secondary storage. @@ -1776,13 +1883,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { * resign the SR and the VDI that should be inside of the snapshot before copying the VHD file to secondary storage. */ private void createVolumeFromSnapshot(SnapshotInfo snapshotInfo) { - if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) { - prepTempVolumeForCopyFromSnapshot(snapshotInfo); - return; - - } - SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "create"); + try { snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); } @@ -1797,31 +1899,20 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { * invocation of createVolumeFromSnapshot(SnapshotInfo). */ private void deleteVolumeFromSnapshot(SnapshotInfo snapshotInfo) { - VolumeVO volumeVO = null; - // cleanup any temporary volume previously created for copy from a snapshot - if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) { - SnapshotDetailsVO tempUuid = null; - tempUuid = _snapshotDetailsDao.findDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID"); - if (tempUuid == null || tempUuid.getValue() == null) { - return; - } - - volumeVO = _volumeDao.findByUuid(tempUuid.getValue()); - if (volumeVO != null) { - _volumeDao.remove(volumeVO.getId()); - } - _snapshotDetailsDao.remove(tempUuid.getId()); - _snapshotDetailsDao.removeDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID"); - return; - } - - SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete"); - try { - snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); - } - finally { - _snapshotDetailsDao.remove(snapshotDetails.getId()); + logger.debug("Cleaning up temporary volume created for copy from a snapshot"); + + SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete"); + + try { + snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); + } + finally { + _snapshotDetailsDao.remove(snapshotDetails.getId()); + } + + } catch (Throwable e) { + logger.warn("Failed to clean up temporary volume created for copy from a snapshot, transction will not be failed but an adminstrator should clean this up: " + snapshotInfo.getUuid() + " - " + snapshotInfo.getPath(), e); } } @@ -1906,7 +1997,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { throw new CloudRuntimeException("Invalid hypervisor type (only KVM supported for this operation at the time being)"); } - verifyLiveMigrationForKVM(volumeDataStoreMap, destHost); + verifyLiveMigrationForKVM(volumeDataStoreMap); VMInstanceVO vmInstance = _vmDao.findById(vmTO.getId()); vmTO.setState(vmInstance.getState()); @@ -1933,7 +2024,10 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { continue; } - if (srcVolumeInfo.getTemplateId() != null) { + VMTemplateVO vmTemplate = _vmTemplateDao.findById(vmInstance.getTemplateId()); + if (srcVolumeInfo.getTemplateId() != null && + Objects.nonNull(vmTemplate) && + !Arrays.asList(KVM_VM_IMPORT_DEFAULT_TEMPLATE_NAME, VM_IMPORT_DEFAULT_TEMPLATE_NAME).contains(vmTemplate.getName())) { logger.debug(String.format("Copying template [%s] of volume [%s] from source storage pool [%s] to target storage pool [%s].", srcVolumeInfo.getTemplateId(), srcVolumeInfo.getId(), sourceStoragePool.getId(), destStoragePool.getId())); copyTemplateToTargetFilesystemStorageIfNeeded(srcVolumeInfo, sourceStoragePool, destDataStore, destStoragePool, destHost); } else { @@ -1977,8 +2071,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { MigrateCommand.MigrateDiskInfo migrateDiskInfo; - boolean isNonManagedNfsToNfsOrSharedMountPointToNfs = supportStoragePoolType(sourceStoragePool.getPoolType()) && destStoragePool.getPoolType() == StoragePoolType.NetworkFilesystem && !managedStorageDestination; - if (isNonManagedNfsToNfsOrSharedMountPointToNfs) { + boolean isNonManagedToNfs = supportStoragePoolType(sourceStoragePool.getPoolType(), StoragePoolType.Filesystem) && destStoragePool.getPoolType() == StoragePoolType.NetworkFilesystem && !managedStorageDestination; + if (isNonManagedToNfs) { migrateDiskInfo = new MigrateCommand.MigrateDiskInfo(srcVolumeInfo.getPath(), MigrateCommand.MigrateDiskInfo.DiskType.FILE, MigrateCommand.MigrateDiskInfo.DriverType.QCOW2, @@ -2152,7 +2246,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { if (srcVolumeInfo.getHypervisorType() == HypervisorType.KVM && srcVolumeInfo.getTemplateId() != null && srcVolumeInfo.getPoolId() != null) { VMTemplateVO template = _vmTemplateDao.findById(srcVolumeInfo.getTemplateId()); - if (template.getFormat() != null && template.getFormat() != Storage.ImageFormat.ISO) { + if (Objects.nonNull(template) && template.getFormat() != null && template.getFormat() != Storage.ImageFormat.ISO) { VMTemplateStoragePoolVO ref = templatePoolDao.findByPoolTemplate(srcVolumeInfo.getPoolId(), srcVolumeInfo.getTemplateId(), null); return ref != null ? ref.getInstallPath() : null; } @@ -2357,9 +2451,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { * At a high level: The source storage cannot be managed and * the destination storages can be all managed or all not managed, not mixed. */ - protected void verifyLiveMigrationForKVM(Map volumeDataStoreMap, Host destHost) { + protected void verifyLiveMigrationForKVM(Map volumeDataStoreMap) { Boolean storageTypeConsistency = null; - Map sourcePools = new HashMap<>(); for (Map.Entry entry : volumeDataStoreMap.entrySet()) { VolumeInfo volumeInfo = entry.getKey(); @@ -2386,47 +2479,6 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { } else if (storageTypeConsistency != destStoragePoolVO.isManaged()) { throw new CloudRuntimeException("Destination storage pools must be either all managed or all not managed"); } - - addSourcePoolToPoolsMap(sourcePools, srcStoragePoolVO, destStoragePoolVO); - } - verifyDestinationStorage(sourcePools, destHost); - } - - /** - * Adds source storage pool to the migration map if the destination pool is not managed and it is NFS. - */ - protected void addSourcePoolToPoolsMap(Map sourcePools, StoragePoolVO srcStoragePoolVO, StoragePoolVO destStoragePoolVO) { - if (destStoragePoolVO.isManaged() || !StoragePoolType.NetworkFilesystem.equals(destStoragePoolVO.getPoolType())) { - logger.trace(String.format("Skipping adding source pool [%s] to map due to destination pool [%s] is managed or not NFS.", srcStoragePoolVO, destStoragePoolVO)); - return; - } - - String sourceStoragePoolUuid = srcStoragePoolVO.getUuid(); - if (!sourcePools.containsKey(sourceStoragePoolUuid)) { - sourcePools.put(sourceStoragePoolUuid, srcStoragePoolVO.getPoolType()); - } - } - - /** - * Perform storage validation on destination host for KVM live storage migrations. - * Validate that volume source storage pools are mounted on the destination host prior the migration - * @throws CloudRuntimeException if any source storage pool is not mounted on the destination host - */ - private void verifyDestinationStorage(Map sourcePools, Host destHost) { - if (MapUtils.isNotEmpty(sourcePools)) { - logger.debug("Verifying source pools are already available on destination host " + destHost.getUuid()); - CheckStorageAvailabilityCommand cmd = new CheckStorageAvailabilityCommand(sourcePools); - try { - Answer answer = agentManager.send(destHost.getId(), cmd); - if (answer == null || !answer.getResult()) { - throw new CloudRuntimeException("Storage verification failed on host " - + destHost.getUuid() +": " + answer.getDetails()); - } - } catch (AgentUnavailableException | OperationTimedoutException e) { - e.printStackTrace(); - throw new CloudRuntimeException("Cannot perform storage verification on host " + destHost.getUuid() + - "due to: " + e.getMessage()); - } } } @@ -2497,15 +2549,15 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); - CopyCommand copyCommand = new CopyCommand(volumeInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); - try { handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); - if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType()) { + if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType() || StoragePoolType.FiberChannel == storagePoolVO.getPoolType()) { _volumeService.grantAccess(volumeInfo, hostVO, srcDataStore); } + CopyCommand copyCommand = new CopyCommand(volumeInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); + Map srcDetails = getVolumeDetails(volumeInfo); copyCommand.setOptions(srcDetails); @@ -2534,7 +2586,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { throw new CloudRuntimeException(msg + ex.getMessage(), ex); } finally { - if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType()) { + if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType() || StoragePoolType.FiberChannel == storagePoolVO.getPoolType()) { try { _volumeService.revokeAccess(volumeInfo, hostVO, srcDataStore); } @@ -2629,13 +2681,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { long snapshotId = snapshotInfo.getId(); - // if the snapshot required a temporary volume be created check if the UUID is set so we can - // retrieve the temporary volume's path to use during remote copy - List storedDetails = _snapshotDetailsDao.findDetails(snapshotInfo.getId(), "TemporaryVolumeCopyPath"); - if (storedDetails != null && storedDetails.size() > 0) { - String value = storedDetails.get(0).getValue(); - snapshotDetails.put(DiskTO.PATH, value); - } else if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex || storagePoolVO.getPoolType() == StoragePoolType.FiberChannel) { + if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex || storagePoolVO.getPoolType() == StoragePoolType.FiberChannel) { snapshotDetails.put(DiskTO.IQN, snapshotInfo.getPath()); } else { snapshotDetails.put(DiskTO.IQN, getSnapshotProperty(snapshotId, DiskTO.IQN)); @@ -2851,6 +2897,8 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { Map srcDetails = getVolumeDetails(srcVolumeInfo); Map destDetails = getVolumeDetails(destVolumeInfo); + _volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); + MigrateVolumeCommand migrateVolumeCommand = new MigrateVolumeCommand(srcVolumeInfo.getTO(), destVolumeInfo.getTO(), srcDetails, destDetails, StorageManager.KvmStorageOfflineMigrationWait.value()); @@ -2893,18 +2941,18 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { StoragePoolVO storagePoolVO = _storagePoolDao.findById(srcVolumeInfo.getPoolId()); Map srcDetails = getVolumeDetails(srcVolumeInfo); - CopyVolumeCommand copyVolumeCommand = new CopyVolumeCommand(srcVolumeInfo.getId(), destVolumeInfo.getPath(), storagePoolVO, - destVolumeInfo.getDataStore().getUri(), true, StorageManager.KvmStorageOfflineMigrationWait.value(), true); - - copyVolumeCommand.setSrcData(srcVolumeInfo.getTO()); - copyVolumeCommand.setSrcDetails(srcDetails); - handleQualityOfServiceForVolumeMigration(srcVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); if (srcVolumeDetached) { _volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); } + CopyVolumeCommand copyVolumeCommand = new CopyVolumeCommand(srcVolumeInfo.getId(), destVolumeInfo.getPath(), storagePoolVO, + destVolumeInfo.getDataStore().getUri(), true, StorageManager.KvmStorageOfflineMigrationWait.value(), true); + + copyVolumeCommand.setSrcData(srcVolumeInfo.getTO()); + copyVolumeCommand.setSrcDetails(srcDetails); + CopyVolumeAnswer copyVolumeAnswer = (CopyVolumeAnswer)agentManager.send(hostVO.getId(), copyVolumeCommand); if (copyVolumeAnswer == null || !copyVolumeAnswer.getResult()) { @@ -2976,19 +3024,20 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { srcData = cacheData; } - CopyCommand copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); - try { + CopyCommand copyCommand = null; if (Snapshot.LocationType.PRIMARY.equals(locationType)) { _volumeService.grantAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore()); Map srcDetails = getSnapshotDetails(snapshotInfo); + copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); copyCommand.setOptions(srcDetails); + } else { + _volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore()); + copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); } - _volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore()); - Map destDetails = getVolumeDetails(volumeInfo); copyCommand.setOptions2(destDetails); diff --git a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategyTest.java b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategyTest.java index 56e0948e593..67e3ea844d5 100755 --- a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategyTest.java +++ b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategyTest.java @@ -21,7 +21,6 @@ package org.apache.cloudstack.storage.motion; import static org.mockito.Mockito.when; import java.lang.reflect.Field; -import java.lang.reflect.Modifier; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.never; @@ -55,8 +54,6 @@ public class AncientDataMotionStrategyTest { @Mock PrimaryDataStoreTO dataStoreTO; @Mock - ConfigKey vmwareKey; - @Mock StorageManager storageManager; @Mock StoragePool storagePool; @@ -66,9 +63,7 @@ public class AncientDataMotionStrategyTest { @Before public void setup() throws Exception { - replaceVmwareCreateCloneFullField(); - - when(vmwareKey.valueIn(POOL_ID)).thenReturn(FULL_CLONE_FLAG); + overrideDefaultConfigValue(StorageManager.VmwareCreateCloneFull, String.valueOf(FULL_CLONE_FLAG)); when(dataTO.getHypervisorType()).thenReturn(HypervisorType.VMware); when(dataTO.getDataStore()).thenReturn(dataStoreTO); @@ -76,14 +71,10 @@ public class AncientDataMotionStrategyTest { when(storageManager.getStoragePool(POOL_ID)).thenReturn(storagePool); } - private void replaceVmwareCreateCloneFullField() throws Exception { - Field field = StorageManager.class.getDeclaredField("VmwareCreateCloneFull"); - field.setAccessible(true); - // remove final modifier from field - Field modifiersField = Field.class.getDeclaredField("modifiers"); - modifiersField.setAccessible(true); - modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); - field.set(null, vmwareKey); + private void overrideDefaultConfigValue(final ConfigKey configKey, final String value) throws IllegalAccessException, NoSuchFieldException { + final Field f = ConfigKey.class.getDeclaredField("_defaultValue"); + f.setAccessible(true); + f.set(configKey, value); } @Test diff --git a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java index 87a2288cfdc..b7468195f5d 100644 --- a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java +++ b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java @@ -476,19 +476,19 @@ public class KvmNonManagedStorageSystemDataMotionTest { @Test public void testVerifyLiveMigrationMapForKVM() { - kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap, host2); + kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap); } @Test(expected = CloudRuntimeException.class) public void testVerifyLiveMigrationMapForKVMNotExistingSource() { when(primaryDataStoreDao.findById(POOL_1_ID)).thenReturn(null); - kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap, host2); + kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap); } @Test(expected = CloudRuntimeException.class) public void testVerifyLiveMigrationMapForKVMNotExistingDest() { when(primaryDataStoreDao.findById(POOL_2_ID)).thenReturn(null); - kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap, host2); + kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap); } @Test(expected = CloudRuntimeException.class) @@ -497,7 +497,7 @@ public class KvmNonManagedStorageSystemDataMotionTest { when(pool1.getId()).thenReturn(POOL_1_ID); when(pool2.getId()).thenReturn(POOL_2_ID); lenient().when(pool2.isManaged()).thenReturn(false); - kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap, host2); + kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap); } @Test diff --git a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategyTest.java b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategyTest.java index cea9de3f1b4..45357fa64b2 100644 --- a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategyTest.java +++ b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategyTest.java @@ -23,7 +23,6 @@ import static org.junit.Assert.assertFalse; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.lenient; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; import static org.mockito.MockitoAnnotations.initMocks; import java.util.HashMap; @@ -48,7 +47,6 @@ import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; -import org.mockito.verification.VerificationMode; import com.cloud.agent.api.MigrateCommand; import com.cloud.host.HostVO; @@ -62,7 +60,6 @@ import com.cloud.storage.VolumeVO; import java.util.AbstractMap; import java.util.Arrays; import java.util.HashSet; -import java.util.LinkedList; import java.util.List; import java.util.Set; @@ -372,72 +369,4 @@ public class StorageSystemDataMotionStrategyTest { assertFalse(strategy.isStoragePoolTypeInList(StoragePoolType.SharedMountPoint, listTypes)); } - - @Test - public void validateAddSourcePoolToPoolsMapDestinationPoolIsManaged() { - Mockito.doReturn(true).when(destinationStoragePoolVoMock).isManaged(); - strategy.addSourcePoolToPoolsMap(mapStringStoragePoolTypeMock, sourceStoragePoolVoMock, destinationStoragePoolVoMock); - - Mockito.verify(destinationStoragePoolVoMock).isManaged(); - Mockito.verifyNoMoreInteractions(mapStringStoragePoolTypeMock, sourceStoragePoolVoMock, destinationStoragePoolVoMock); - } - - @Test - public void validateAddSourcePoolToPoolsMapDestinationPoolIsNotNFS() { - List storagePoolTypes = new LinkedList<>(Arrays.asList(StoragePoolType.values())); - storagePoolTypes.remove(StoragePoolType.NetworkFilesystem); - - Mockito.doReturn(false).when(destinationStoragePoolVoMock).isManaged(); - storagePoolTypes.forEach(poolType -> { - Mockito.doReturn(poolType).when(destinationStoragePoolVoMock).getPoolType(); - strategy.addSourcePoolToPoolsMap(mapStringStoragePoolTypeMock, sourceStoragePoolVoMock, destinationStoragePoolVoMock); - }); - - VerificationMode times = Mockito.times(storagePoolTypes.size()); - Mockito.verify(destinationStoragePoolVoMock, times).isManaged(); - Mockito.verify(destinationStoragePoolVoMock, times).getPoolType(); - Mockito.verifyNoMoreInteractions(mapStringStoragePoolTypeMock, sourceStoragePoolVoMock, destinationStoragePoolVoMock); - } - - @Test - public void validateAddSourcePoolToPoolsMapMapContainsKey() { - Mockito.doReturn(false).when(destinationStoragePoolVoMock).isManaged(); - Mockito.doReturn(StoragePoolType.NetworkFilesystem).when(destinationStoragePoolVoMock).getPoolType(); - Mockito.doReturn("").when(sourceStoragePoolVoMock).getUuid(); - Mockito.doReturn(true).when(mapStringStoragePoolTypeMock).containsKey(Mockito.anyString()); - strategy.addSourcePoolToPoolsMap(mapStringStoragePoolTypeMock, sourceStoragePoolVoMock, destinationStoragePoolVoMock); - - Mockito.verify(destinationStoragePoolVoMock, never()).getScope(); - Mockito.verify(destinationStoragePoolVoMock).isManaged(); - Mockito.verify(destinationStoragePoolVoMock).getPoolType(); - Mockito.verify(sourceStoragePoolVoMock).getUuid(); - Mockito.verify(mapStringStoragePoolTypeMock).containsKey(Mockito.anyString()); - Mockito.verifyNoMoreInteractions(mapStringStoragePoolTypeMock, sourceStoragePoolVoMock, destinationStoragePoolVoMock); - } - - @Test - public void validateAddSourcePoolToPoolsMapMapDoesNotContainsKey() { - List storagePoolTypes = new LinkedList<>(Arrays.asList(StoragePoolType.values())); - - Mockito.doReturn(false).when(destinationStoragePoolVoMock).isManaged(); - Mockito.doReturn(StoragePoolType.NetworkFilesystem).when(destinationStoragePoolVoMock).getPoolType(); - Mockito.doReturn("").when(sourceStoragePoolVoMock).getUuid(); - Mockito.doReturn(false).when(mapStringStoragePoolTypeMock).containsKey(Mockito.anyString()); - Mockito.doReturn(null).when(mapStringStoragePoolTypeMock).put(Mockito.anyString(), Mockito.any()); - - storagePoolTypes.forEach(poolType -> { - Mockito.doReturn(poolType).when(sourceStoragePoolVoMock).getPoolType(); - strategy.addSourcePoolToPoolsMap(mapStringStoragePoolTypeMock, sourceStoragePoolVoMock, destinationStoragePoolVoMock); - }); - - VerificationMode times = Mockito.times(storagePoolTypes.size()); - Mockito.verify(destinationStoragePoolVoMock, never()).getScope(); - Mockito.verify(destinationStoragePoolVoMock, times).isManaged(); - Mockito.verify(destinationStoragePoolVoMock, times).getPoolType(); - Mockito.verify(sourceStoragePoolVoMock, times).getUuid(); - Mockito.verify(mapStringStoragePoolTypeMock, times).containsKey(Mockito.anyString()); - Mockito.verify(sourceStoragePoolVoMock, times).getPoolType(); - Mockito.verify(mapStringStoragePoolTypeMock, times).put(Mockito.anyString(), Mockito.any()); - Mockito.verifyNoMoreInteractions(mapStringStoragePoolTypeMock, sourceStoragePoolVoMock, destinationStoragePoolVoMock); - } } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java index ba783e81586..5109118fb54 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java @@ -100,6 +100,9 @@ public class TemplateDataFactoryImpl implements TemplateDataFactory { @Override public TemplateInfo getTemplate(long templateId, DataStore store) { VMTemplateVO templ = imageDataDao.findById(templateId); + if (templ == null) { + return null; + } if (store == null && !templ.isDirectDownload()) { TemplateObject tmpl = TemplateObject.getTemplate(templ, null, null); return tmpl; diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java index 39d4618f81c..5e21f37f4d5 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java @@ -533,11 +533,6 @@ public class TemplateServiceImpl implements TemplateService { logger.info("Skip downloading template " + tmplt.getUniqueName() + " since no url is specified."); continue; } - // if this is private template, skip sync to a new image store - if (isSkipTemplateStoreDownload(tmplt, zoneId)) { - logger.info("Skip sync downloading private template " + tmplt.getUniqueName() + " to a new image store"); - continue; - } // if this is a region store, and there is already an DOWNLOADED entry there without install_path information, which // means that this is a duplicate entry from migration of previous NFS to staging. @@ -819,7 +814,7 @@ public class TemplateServiceImpl implements TemplateService { String templateName = dataDiskTemplate.isIso() ? dataDiskTemplate.getPath().substring(dataDiskTemplate.getPath().lastIndexOf(File.separator) + 1) : template.getName() + suffix + diskCount; VMTemplateVO templateVO = new VMTemplateVO(templateId, templateName, format, false, false, false, ttype, template.getUrl(), template.requiresHvm(), template.getBits(), template.getAccountId(), null, templateName, false, guestOsId, false, template.getHypervisorType(), null, - null, false, false, false, false); + null, false, false, false, false, template.getArch()); if (dataDiskTemplate.isIso()){ templateVO.setUniqueName(templateName); } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java index 11a13e7ccb4..d2f08260aa3 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java @@ -202,7 +202,7 @@ public class ImageStoreProviderManagerImpl implements ImageStoreProviderManager, // No store with space found logger.error(String.format("Can't find an image storage in zone with less than %d usage", - Math.round(_statsCollector.getImageStoreCapacityThreshold()*100))); + Math.round(_statsCollector.getImageStoreCapacityThreshold() * 100))); return null; } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java index c5de70b2175..a322705f2f4 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -23,6 +23,7 @@ import java.util.Map; import javax.inject.Inject; +import com.cloud.cpu.CPU; import com.cloud.storage.StorageManager; import com.cloud.user.UserData; import org.apache.logging.log4j.Logger; @@ -82,6 +83,10 @@ public class TemplateObject implements TemplateInfo { } protected void configure(VMTemplateVO template, DataStore dataStore) { + if (template == null) { + String msg = String.format("Template Object is not properly initialised %s", this.toString()); + logger.warn(msg); + } imageVO = template; this.dataStore = dataStore; } @@ -98,6 +103,10 @@ public class TemplateObject implements TemplateInfo { } public VMTemplateVO getImage() { + if (imageVO == null) { + String msg = String.format("Template Object is not properly initialised %s", this.toString()); + logger.error(msg); + } // somehow the nullpointer is needed : refacter needed!?! return imageVO; } @@ -342,6 +351,11 @@ public class TemplateObject implements TemplateInfo { return imageVO.getUserDataOverridePolicy(); } + @Override + public CPU.CPUArch getArch() { + return imageVO.getArch(); + } + @Override public DataTO getTO() { DataTO to = null; diff --git a/engine/storage/integration-test/src/test/resources/component.xml b/engine/storage/integration-test/src/test/resources/component.xml index d384d546665..608326d1279 100644 --- a/engine/storage/integration-test/src/test/resources/component.xml +++ b/engine/storage/integration-test/src/test/resources/component.xml @@ -17,13 +17,13 @@ under the License. --> - + - + @@ -47,15 +47,15 @@ - - + + - + @@ -74,7 +74,7 @@ - + @@ -89,7 +89,7 @@ - + @@ -138,8 +138,8 @@ - - + + @@ -179,11 +179,11 @@ - + - + diff --git a/engine/storage/integration-test/src/test/resources/s3_testng.xml b/engine/storage/integration-test/src/test/resources/s3_testng.xml index c46e5a2db04..8beb598ee00 100644 --- a/engine/storage/integration-test/src/test/resources/s3_testng.xml +++ b/engine/storage/integration-test/src/test/resources/s3_testng.xml @@ -28,17 +28,17 @@ - + - + - + diff --git a/engine/storage/integration-test/src/test/resources/storageContext.xml b/engine/storage/integration-test/src/test/resources/storageContext.xml index 7c95345f673..f38e718ea41 100644 --- a/engine/storage/integration-test/src/test/resources/storageContext.xml +++ b/engine/storage/integration-test/src/test/resources/storageContext.xml @@ -1,27 +1,27 @@ - - + @@ -36,10 +36,10 @@ - - - - + + + + @@ -56,7 +56,7 @@ - + diff --git a/engine/storage/integration-test/src/test/resources/testng.xml b/engine/storage/integration-test/src/test/resources/testng.xml index fb4330999f3..34ef6defcd9 100644 --- a/engine/storage/integration-test/src/test/resources/testng.xml +++ b/engine/storage/integration-test/src/test/resources/testng.xml @@ -27,10 +27,10 @@ - + - + @@ -40,11 +40,11 @@ - + - + diff --git a/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java b/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java index 3c525ba9364..f1c27526f52 100644 --- a/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java +++ b/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java @@ -18,6 +18,7 @@ */ package org.apache.cloudstack.storage.object.store; +import com.cloud.agent.api.to.BucketTO; import com.cloud.agent.api.to.DataStoreTO; import org.apache.cloudstack.storage.object.Bucket; import com.cloud.storage.DataStoreRole; @@ -107,38 +108,38 @@ public class ObjectStoreImpl implements ObjectStoreEntity { } @Override - public boolean deleteBucket(String bucketName) { - return driver.deleteBucket(bucketName, objectStoreVO.getId()); + public boolean deleteBucket(BucketTO bucket) { + return driver.deleteBucket(bucket, objectStoreVO.getId()); } @Override - public boolean setBucketEncryption(String bucketName) { - return driver.setBucketEncryption(bucketName, objectStoreVO.getId()); + public boolean setBucketEncryption(BucketTO bucket) { + return driver.setBucketEncryption(bucket, objectStoreVO.getId()); } @Override - public boolean deleteBucketEncryption(String bucketName) { - return driver.deleteBucketEncryption(bucketName, objectStoreVO.getId()); + public boolean deleteBucketEncryption(BucketTO bucket) { + return driver.deleteBucketEncryption(bucket, objectStoreVO.getId()); } @Override - public boolean setBucketVersioning(String bucketName) { - return driver.setBucketVersioning(bucketName, objectStoreVO.getId()); + public boolean setBucketVersioning(BucketTO bucket) { + return driver.setBucketVersioning(bucket, objectStoreVO.getId()); } @Override - public boolean deleteBucketVersioning(String bucketName) { - return driver.deleteBucketVersioning(bucketName, objectStoreVO.getId()); + public boolean deleteBucketVersioning(BucketTO bucket) { + return driver.deleteBucketVersioning(bucket, objectStoreVO.getId()); } @Override - public void setBucketPolicy(String bucketName, String policy) { - driver.setBucketPolicy(bucketName, policy, objectStoreVO.getId()); + public void setBucketPolicy(BucketTO bucket, String policy) { + driver.setBucketPolicy(bucket, policy, objectStoreVO.getId()); } @Override - public void setQuota(String bucketName, int quota) { - driver.setBucketQuota(bucketName, objectStoreVO.getId(), quota); + public void setQuota(BucketTO bucket, int quota) { + driver.setBucketQuota(bucket, objectStoreVO.getId(), quota); } @Override diff --git a/engine/storage/object/src/test/resource/testContext.xml b/engine/storage/object/src/test/resource/testContext.xml index 7352b1148f7..c168582bab8 100644 --- a/engine/storage/object/src/test/resource/testContext.xml +++ b/engine/storage/object/src/test/resource/testContext.xml @@ -22,7 +22,7 @@ xmlns:tx="http://www.springframework.org/schema/tx" xmlns:aop="http://www.springframework.org/schema/aop" xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd - http://www.springframework.org/schema/tx + http://www.springframework.org/schema/tx http://www.springframework.org/schema/tx/spring-tx.xsd http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd @@ -38,7 +38,7 @@ - + @@ -49,7 +49,7 @@ - + @@ -58,7 +58,7 @@ - + @@ -68,12 +68,12 @@ - + - + diff --git a/engine/storage/snapshot/pom.xml b/engine/storage/snapshot/pom.xml index ac0daeabf76..f29b43d8de0 100644 --- a/engine/storage/snapshot/pom.xml +++ b/engine/storage/snapshot/pom.xml @@ -56,6 +56,11 @@ ${project.version} compile + + mysql + mysql-connector-java + test + diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java index 7e902bc61fe..afc8be1e5f9 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java @@ -100,6 +100,8 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase { @Inject SnapshotZoneDao snapshotZoneDao; + private final List snapshotStatesAbleToDeleteSnapshot = Arrays.asList(Snapshot.State.Destroying, Snapshot.State.Destroyed, Snapshot.State.Error); + public SnapshotDataStoreVO getSnapshotImageStoreRef(long snapshotId, long zoneId) { List snaps = snapshotStoreDao.listReadyBySnapshot(snapshotId, DataStoreRole.Image); for (SnapshotDataStoreVO ref : snaps) { @@ -197,9 +199,8 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase { boolean result = false; boolean resultIsSet = false; - final List snapshotStatesAbleToDeleteSnapshot = Arrays.asList(Snapshot.State.BackedUp, Snapshot.State.Destroying, Snapshot.State.Destroyed, Snapshot.State.Error); try { - while (snapshot != null && snapshotStatesAbleToDeleteSnapshot.contains(snapshot.getState())) { + do { SnapshotInfo child = snapshot.getChild(); if (child != null) { @@ -245,7 +246,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase { } snapshot = parent; - } + } while (snapshot != null && snapshotStatesAbleToDeleteSnapshot.contains(snapshot.getState())); } catch (Exception e) { logger.error(String.format("Failed to delete snapshot [%s] on storage [%s] due to [%s].", snapshotTo, storageToString, e.getMessage()), e); } diff --git a/engine/storage/snapshot/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-snapshot-core-context.xml b/engine/storage/snapshot/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-snapshot-core-context.xml index 1d1c831a7b0..cd64cedb7b2 100644 --- a/engine/storage/snapshot/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-snapshot-core-context.xml +++ b/engine/storage/snapshot/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-snapshot-core-context.xml @@ -35,5 +35,5 @@ - + diff --git a/engine/storage/snapshot/src/main/resources/META-INF/cloudstack/storage/spring-engine-storage-snapshot-storage-context.xml b/engine/storage/snapshot/src/main/resources/META-INF/cloudstack/storage/spring-engine-storage-snapshot-storage-context.xml index 56c6fff63e0..fc561159c8e 100644 --- a/engine/storage/snapshot/src/main/resources/META-INF/cloudstack/storage/spring-engine-storage-snapshot-storage-context.xml +++ b/engine/storage/snapshot/src/main/resources/META-INF/cloudstack/storage/spring-engine-storage-snapshot-storage-context.xml @@ -32,7 +32,7 @@ - + diff --git a/engine/storage/snapshot/src/test/resources/SnapshotManagerTestContext.xml b/engine/storage/snapshot/src/test/resources/SnapshotManagerTestContext.xml index a06ba6a3b9a..b0aa3956f02 100644 --- a/engine/storage/snapshot/src/test/resources/SnapshotManagerTestContext.xml +++ b/engine/storage/snapshot/src/test/resources/SnapshotManagerTestContext.xml @@ -1,19 +1,19 @@ - epIds = hostDao.listSsvmHostsWithPendingMigrateJobsOrderedByJobCount(); if (epIds.isEmpty()) { Collections.shuffle(eps); endPoint = eps.get(0); @@ -533,23 +530,4 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { private Integer getCopyCmdsCountToSpecificSSVM(Long ssvmId) { return _cmdExecLogDao.getCopyCmdCountForSSVM(ssvmId); } - - private List ssvmWithLeastMigrateJobs() { - logger.debug("Picking ssvm from the pool with least commands running on it"); - String query = "select host_id, count(*) from cmd_exec_log group by host_id order by 2;"; - TransactionLegacy txn = TransactionLegacy.currentTxn(); - - List result = new ArrayList(); - PreparedStatement pstmt = null; - try { - pstmt = txn.prepareAutoCloseStatement(query); - ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - result.add((long) rs.getInt(1)); - } - } catch (SQLException e) { - logger.debug("SQLException caught", e); - } - return result; - } } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java index 2c3d5ccfdde..9eae1fc0711 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java @@ -407,4 +407,16 @@ public class VolumeDataStoreDaoImpl extends GenericDaoBase volumeIds, Long batchSize) { + if (CollectionUtils.isEmpty(volumeIds)) { + return 0; + } + SearchBuilder sb = createSearchBuilder(); + sb.and("volumeIds", sb.entity().getVolumeId(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("volumeIds", volumeIds.toArray()); + return batchExpunge(sc, batchSize); + } } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/object/ObjectStoreDriver.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/object/ObjectStoreDriver.java index 4953b9b0cdf..13aaf7c002e 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/object/ObjectStoreDriver.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/object/ObjectStoreDriver.java @@ -20,6 +20,7 @@ package org.apache.cloudstack.storage.object; import com.amazonaws.services.s3.model.AccessControlList; import com.amazonaws.services.s3.model.BucketPolicy; +import com.cloud.agent.api.to.BucketTO; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import java.util.List; @@ -30,30 +31,30 @@ public interface ObjectStoreDriver extends DataStoreDriver { List listBuckets(long storeId); - boolean deleteBucket(String bucketName, long storeId); + boolean deleteBucket(BucketTO bucket, long storeId); - AccessControlList getBucketAcl(String bucketName, long storeId); + AccessControlList getBucketAcl(BucketTO bucket, long storeId); - void setBucketAcl(String bucketName, AccessControlList acl, long storeId); + void setBucketAcl(BucketTO bucket, AccessControlList acl, long storeId); - void setBucketPolicy(String bucketName, String policyType, long storeId); + void setBucketPolicy(BucketTO bucket, String policyType, long storeId); - BucketPolicy getBucketPolicy(String bucketName, long storeId); + BucketPolicy getBucketPolicy(BucketTO bucket, long storeId); - void deleteBucketPolicy(String bucketName, long storeId); + void deleteBucketPolicy(BucketTO bucket, long storeId); boolean createUser(long accountId, long storeId); - boolean setBucketEncryption(String bucketName, long storeId); + boolean setBucketEncryption(BucketTO bucket, long storeId); - boolean deleteBucketEncryption(String bucketName, long storeId); + boolean deleteBucketEncryption(BucketTO bucket, long storeId); - boolean setBucketVersioning(String bucketName, long storeId); + boolean setBucketVersioning(BucketTO bucket, long storeId); - boolean deleteBucketVersioning(String bucketName, long storeId); + boolean deleteBucketVersioning(BucketTO bucket, long storeId); - void setBucketQuota(String bucketName, long storeId, long size); + void setBucketQuota(BucketTO bucket, long storeId, long size); Map getAllBucketsUsage(long storeId); } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java index 8044a2dfa5e..e4c26932619 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java @@ -32,6 +32,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; @@ -43,17 +44,20 @@ import com.cloud.agent.api.StoragePoolInfo; import com.cloud.capacity.Capacity; import com.cloud.capacity.CapacityVO; import com.cloud.capacity.dao.CapacityDao; -import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.exception.InvalidParameterValueException; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.DataStoreRole; import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.utils.crypt.DBEncryptionUtil; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; @Component @@ -266,4 +270,48 @@ public class PrimaryDataStoreHelper { return true; } + public void switchToZone(DataStore store, HypervisorType hypervisorType) { + StoragePoolVO pool = dataStoreDao.findById(store.getId()); + CapacityVO capacity = _capacityDao.findByHostIdType(store.getId(), Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED); + Transaction.execute(new TransactionCallbackNoReturn() { + public void doInTransactionWithoutResult(TransactionStatus status) { + pool.setScope(ScopeType.ZONE); + pool.setPodId(null); + pool.setClusterId(null); + pool.setHypervisor(hypervisorType); + dataStoreDao.update(pool.getId(), pool); + + capacity.setPodId(null); + capacity.setClusterId(null); + _capacityDao.update(capacity.getId(), capacity); + } + }); + logger.debug("Scope of storage pool id=" + pool.getId() + " is changed to zone"); + } + + public void switchToCluster(DataStore store, ClusterScope clusterScope) { + List hostPoolRecords = storagePoolHostDao.listByPoolIdNotInCluster(clusterScope.getScopeId(), store.getId()).first(); + StoragePoolVO pool = dataStoreDao.findById(store.getId()); + CapacityVO capacity = _capacityDao.findByHostIdType(store.getId(), Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED); + + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + if (hostPoolRecords != null) { + for (StoragePoolHostVO host : hostPoolRecords) { + storagePoolHostDao.deleteStoragePoolHostDetails(host.getHostId(), host.getPoolId()); + } + } + pool.setScope(ScopeType.CLUSTER); + pool.setPodId(clusterScope.getPodId()); + pool.setClusterId(clusterScope.getScopeId()); + dataStoreDao.update(pool.getId(), pool); + + capacity.setPodId(clusterScope.getPodId()); + capacity.setClusterId(clusterScope.getScopeId()); + _capacityDao.update(capacity.getId(), capacity); + } + }); + logger.debug("Scope of storage pool id=" + pool.getId() + " is changed to cluster id=" + clusterScope.getScopeId()); + } } diff --git a/engine/storage/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-core-context.xml b/engine/storage/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-core-context.xml index 5cecb224125..ad13a397172 100644 --- a/engine/storage/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-core-context.xml +++ b/engine/storage/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-core-context.xml @@ -35,7 +35,7 @@ - + - + - + diff --git a/engine/storage/src/main/resources/META-INF/cloudstack/storage-allocator/spring-engine-storage-storage-allocator-context.xml b/engine/storage/src/main/resources/META-INF/cloudstack/storage-allocator/spring-engine-storage-storage-allocator-context.xml index 783c98aacbe..e68d2c67309 100644 --- a/engine/storage/src/main/resources/META-INF/cloudstack/storage-allocator/spring-engine-storage-storage-allocator-context.xml +++ b/engine/storage/src/main/resources/META-INF/cloudstack/storage-allocator/spring-engine-storage-storage-allocator-context.xml @@ -31,7 +31,7 @@ class="org.apache.cloudstack.storage.allocator.LocalStoragePoolAllocator"> - + - + diff --git a/engine/storage/src/test/java/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImplTest.java b/engine/storage/src/test/java/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImplTest.java new file mode 100644 index 00000000000..0cd88699956 --- /dev/null +++ b/engine/storage/src/test/java/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImplTest.java @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.image.db; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class VolumeDataStoreDaoImplTest { + + @Spy + VolumeDataStoreDaoImpl volumeDataStoreDaoImplSpy; + + @Test + public void testExpungeByVmListNoVms() { + Assert.assertEquals(0, volumeDataStoreDaoImplSpy.expungeByVolumeList( + new ArrayList<>(), 100L)); + Assert.assertEquals(0, volumeDataStoreDaoImplSpy.expungeByVolumeList( + null, 100L)); + } + + @Test + public void testExpungeByVmList() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doAnswer((Answer) invocationOnMock -> { + Long batchSize = (Long)invocationOnMock.getArguments()[1]; + return batchSize == null ? 0 : batchSize.intValue(); + }).when(volumeDataStoreDaoImplSpy).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); + Mockito.when(volumeDataStoreDaoImplSpy.createSearchBuilder()).thenReturn(sb); + final VolumeDataStoreVO mockedVO = Mockito.mock(VolumeDataStoreVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + List vmIds = List.of(1L, 2L); + Object[] array = vmIds.toArray(); + Long batchSize = 50L; + Assert.assertEquals(batchSize.intValue(), volumeDataStoreDaoImplSpy.expungeByVolumeList(List.of(1L, 2L), batchSize)); + Mockito.verify(sc).setParameters("volumeIds", array); + Mockito.verify(volumeDataStoreDaoImplSpy, Mockito.times(1)) + .batchExpunge(sc, batchSize); + } +} diff --git a/engine/storage/src/test/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelperTest.java b/engine/storage/src/test/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelperTest.java new file mode 100644 index 00000000000..3927b43f393 --- /dev/null +++ b/engine/storage/src/test/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelperTest.java @@ -0,0 +1,114 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.volume.datastore; + +import java.util.List; + +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.capacity.Capacity; +import com.cloud.capacity.CapacityVO; +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.utils.Pair; + +@RunWith(MockitoJUnitRunner.class) +public class PrimaryDataStoreHelperTest { + + @Mock + private PrimaryDataStoreDao dataStoreDao; + + @Mock + private CapacityDao capacityDao; + + @Mock + private StoragePoolHostDao storagePoolHostDao; + + @Spy + @InjectMocks + PrimaryDataStoreHelper dataStoreHelper; + + private static final Long ZONE_ID = 1L; + private static final Long CLUSTER_ID = 2L; + private static final Long POD_ID = 3L; + private static final Long POOL_ID = 4L; + private static final Short capacityType = 0; + private static final Float usedPercentage = 0.0f; + + @Test + public void testSwitchToZone() { + StoragePoolVO pool = new StoragePoolVO(POOL_ID, null, null, Storage.StoragePoolType.NetworkFilesystem, ZONE_ID, POD_ID, 0L, 0L, null, 0, null); + pool.setClusterId(CLUSTER_ID); + pool.setScope(ScopeType.CLUSTER); + CapacityVO capacity = new CapacityVO(ZONE_ID, POD_ID, CLUSTER_ID, capacityType, usedPercentage); + + Mockito.when(dataStoreDao.findById(pool.getId())).thenReturn(pool); + Mockito.when(capacityDao.findByHostIdType(pool.getId(), Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED)).thenReturn(capacity); + DataStore storeMock = Mockito.mock(DataStore.class); + Mockito.when(storeMock.getId()).thenReturn(POOL_ID); + + dataStoreHelper.switchToZone(storeMock, HypervisorType.KVM); + + Assert.assertEquals(pool.getScope(), ScopeType.ZONE); + Assert.assertEquals(pool.getPodId(), null); + Assert.assertEquals(pool.getClusterId(), null); + Assert.assertEquals(pool.getHypervisor(), HypervisorType.KVM); + Assert.assertEquals(capacity.getPodId(), null); + Assert.assertEquals(capacity.getClusterId(), null); + } + + @Test + public void testSwitchToCluster() { + StoragePoolVO pool = new StoragePoolVO(POOL_ID, null, null, Storage.StoragePoolType.NetworkFilesystem, ZONE_ID, null, 0L, 0L, null, 0, null); + pool.setScope(ScopeType.ZONE); + CapacityVO capacity = new CapacityVO(ZONE_ID, null, null, capacityType, usedPercentage); + ClusterScope clusterScope = new ClusterScope(CLUSTER_ID, POD_ID, ZONE_ID); + + Pair, Integer> hostPoolRecords = new Pair<>(null, 0); + Mockito.when(storagePoolHostDao.listByPoolIdNotInCluster(CLUSTER_ID, POOL_ID)).thenReturn(hostPoolRecords); + Mockito.when(dataStoreDao.findById(pool.getId())).thenReturn(pool); + Mockito.when(capacityDao.findByHostIdType(pool.getId(), Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED)).thenReturn(capacity); + DataStore storeMock = Mockito.mock(DataStore.class); + Mockito.when(storeMock.getId()).thenReturn(POOL_ID); + + dataStoreHelper.switchToCluster(storeMock, clusterScope); + + Mockito.verify(storagePoolHostDao, Mockito.never()).deleteStoragePoolHostDetails(Mockito.anyLong(), Mockito.anyLong()); + + Assert.assertEquals(pool.getScope(), ScopeType.CLUSTER); + Assert.assertEquals(pool.getPodId(), POD_ID); + Assert.assertEquals(pool.getClusterId(), CLUSTER_ID); + Assert.assertEquals(capacity.getPodId(), POD_ID); + Assert.assertEquals(capacity.getClusterId(), CLUSTER_ID); + } +} diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java new file mode 100644 index 00000000000..1ee4d40a567 --- /dev/null +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImpl.java @@ -0,0 +1,108 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.lifecycle; + +import java.util.Arrays; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.utils.Pair; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +public class BasePrimaryDataStoreLifeCycleImpl { + protected Logger logger = LogManager.getLogger(getClass()); + + @Inject + AgentManager agentMgr; + @Inject + protected ResourceManager resourceMgr; + @Inject + StorageManager storageMgr; + @Inject + PrimaryDataStoreHelper dataStoreHelper; + @Inject + protected HostDao hostDao; + @Inject + protected StoragePoolHostDao storagePoolHostDao; + + private List getPoolHostsList(ClusterScope clusterScope, HypervisorType hypervisorType) { + List hosts; + if (hypervisorType != null) { + hosts = resourceMgr + .listAllHostsInOneZoneNotInClusterByHypervisor(hypervisorType, clusterScope.getZoneId(), clusterScope.getScopeId()); + } else { + List hypervisorTypes = Arrays.asList(HypervisorType.KVM, HypervisorType.VMware); + hosts = resourceMgr + .listAllHostsInOneZoneNotInClusterByHypervisors(hypervisorTypes, clusterScope.getZoneId(), clusterScope.getScopeId()); + } + return hosts; + } + + public void changeStoragePoolScopeToZone(DataStore store, ClusterScope clusterScope, HypervisorType hypervisorType) { + List hosts = getPoolHostsList(clusterScope, hypervisorType); + logger.debug("Changing scope of the storage pool to Zone"); + if (hosts != null) { + for (HostVO host : hosts) { + try { + storageMgr.connectHostToSharedPool(host.getId(), store.getId()); + } catch (Exception e) { + logger.warn("Unable to establish a connection between " + host + " and " + store, e); + } + } + } + dataStoreHelper.switchToZone(store, hypervisorType); + } + + public void changeStoragePoolScopeToCluster(DataStore store, ClusterScope clusterScope, HypervisorType hypervisorType) { + Pair, Integer> hostPoolRecords = storagePoolHostDao.listByPoolIdNotInCluster(clusterScope.getScopeId(), store.getId()); + logger.debug("Changing scope of the storage pool to Cluster"); + if (hostPoolRecords.second() > 0) { + StoragePool pool = (StoragePool) store; + for (StoragePoolHostVO host : hostPoolRecords.first()) { + DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand(pool); + final Answer answer = agentMgr.easySend(host.getHostId(), deleteCmd); + + if (answer != null) { + if (!answer.getResult()) { + logger.debug("Failed to delete storage pool: " + answer.getResult()); + } else if (HypervisorType.KVM != hypervisorType) { + break; + } + } + } + } + dataStoreHelper.switchToCluster(store, clusterScope); + } +} diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java index b8f90e46538..c6d9fab5f17 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java @@ -42,7 +42,9 @@ import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StorageService; import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; + import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -54,7 +56,9 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import javax.inject.Inject; + import java.util.List; +import java.util.Map; public class DefaultHostListener implements HypervisorHostListener { protected Logger logger = LogManager.getLogger(getClass()); @@ -126,7 +130,9 @@ public class DefaultHostListener implements HypervisorHostListener { @Override public boolean hostConnect(long hostId, long poolId) throws StorageConflictException { StoragePool pool = (StoragePool) this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); - ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool); + Pair, Boolean> nfsMountOpts = storageManager.getStoragePoolNFSMountOpts(pool, null); + + ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool, nfsMountOpts.first()); cmd.setWait(modifyStoragePoolCommandWait); logger.debug(String.format("Sending modify storage pool command to agent: %d for storage pool: %d with timeout %d seconds", hostId, poolId, cmd.getWait())); @@ -139,7 +145,7 @@ public class DefaultHostListener implements HypervisorHostListener { if (!answer.getResult()) { String msg = "Unable to attach storage pool" + poolId + " to the host" + hostId; alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg); - throw new CloudRuntimeException("Unable establish connection from storage head to storage pool " + pool.getId() + " due to " + answer.getDetails() + + throw new CloudRuntimeException("Unable to establish connection from storage head to storage pool " + pool.getId() + " due to " + answer.getDetails() + pool.getId()); } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java index 1b3bec0e907..825a8cbd941 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -935,6 +935,11 @@ public class VolumeObject implements VolumeInfo { volumeVO.setEncryptFormat(encryptFormat); } + @Override + public boolean isDeleteProtection() { + return volumeVO.isDeleteProtection(); + } + @Override public boolean isFollowRedirects() { return followRedirects; diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 24a0db0b74a..9a3319f79a3 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -32,7 +32,6 @@ import java.util.concurrent.ExecutionException; import javax.inject.Inject; -import com.cloud.storage.VolumeApiServiceImpl; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.command.user.volume.CheckAndRepairVolumeCmd; @@ -105,6 +104,7 @@ import com.cloud.configuration.Resource.ResourceType; import com.cloud.dc.dao.ClusterDao; import com.cloud.event.EventTypes; import com.cloud.event.UsageEventUtils; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.StorageAccessException; import com.cloud.host.Host; @@ -118,6 +118,7 @@ import com.cloud.org.Grouping.AllocationState; import com.cloud.resource.ResourceState; import com.cloud.server.ManagementService; import com.cloud.storage.CheckAndRepairVolumePayload; +import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.DataStoreRole; import com.cloud.storage.RegisterVolumePayload; import com.cloud.storage.ScopeType; @@ -130,6 +131,7 @@ import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; +import com.cloud.storage.VolumeApiServiceImpl; import com.cloud.storage.Volume.State; import com.cloud.storage.VolumeDetailVO; import com.cloud.storage.VolumeVO; @@ -215,7 +217,7 @@ public class VolumeServiceImpl implements VolumeService { @Inject private PassphraseDao passphraseDao; @Inject - private DiskOfferingDao diskOfferingDao; + protected DiskOfferingDao diskOfferingDao; public VolumeServiceImpl() { } @@ -290,12 +292,12 @@ public class VolumeServiceImpl implements VolumeService { @Override public AsyncCallFuture createVolumeAsync(VolumeInfo volume, DataStore dataStore) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); DataObject volumeOnStore = dataStore.create(volume); volumeOnStore.processEvent(Event.CreateOnlyRequested); try { - CreateVolumeContext context = new CreateVolumeContext(null, volumeOnStore, future); + CreateVolumeContext context = new CreateVolumeContext<>(null, volumeOnStore, future); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().createVolumeCallback(null, null)).setContext(context); @@ -371,7 +373,7 @@ public class VolumeServiceImpl implements VolumeService { @DB @Override public AsyncCallFuture expungeVolumeAsync(VolumeInfo volume) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); VolumeApiResult result = new VolumeApiResult(volume); if (volume.getDataStore() == null) { logger.info("Expunge volume with no data store specified"); @@ -427,7 +429,7 @@ public class VolumeServiceImpl implements VolumeService { volume.processEvent(Event.ExpungeRequested); } - DeleteVolumeContext context = new DeleteVolumeContext(null, vo, future); + DeleteVolumeContext context = new DeleteVolumeContext<>(null, vo, future); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().deleteVolumeCallback(null, null)).setContext(context); @@ -503,7 +505,9 @@ public class VolumeServiceImpl implements VolumeService { _snapshotStoreDao.remove(snapStoreVo.getId()); } } else { - _snapshotStoreDao.remove(snapStoreVo.getId()); + if (!StoragePoolType.StorPool.equals(storagePoolVO.getPoolType())) { + _snapshotStoreDao.remove(snapStoreVo.getId()); + } } } snapshotApiService.markVolumeSnapshotsAsDestroyed(vo); @@ -636,7 +640,7 @@ public class VolumeServiceImpl implements VolumeService { } } long templatePoolRefId = templatePoolRef.getId(); - CreateBaseImageContext context = new CreateBaseImageContext(null, volume, dataStore, template, future, templateOnPrimaryStoreObj, templatePoolRefId); + CreateBaseImageContext context = new CreateBaseImageContext<>(null, volume, dataStore, template, future, templateOnPrimaryStoreObj, templatePoolRefId); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().copyBaseImageCallback(null, null)).setContext(context); @@ -806,7 +810,7 @@ public class VolumeServiceImpl implements VolumeService { DataObject volumeOnPrimaryStorage = pd.create(volume, volume.getDeployAsIsConfiguration()); volumeOnPrimaryStorage.processEvent(Event.CreateOnlyRequested); - CreateVolumeFromBaseImageContext context = new CreateVolumeFromBaseImageContext(null, volumeOnPrimaryStorage, pd, templateOnPrimaryStore, future, null, volume.getDeployAsIsConfiguration()); + CreateVolumeFromBaseImageContext context = new CreateVolumeFromBaseImageContext<>(null, volumeOnPrimaryStorage, pd, templateOnPrimaryStore, future, null, volume.getDeployAsIsConfiguration()); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().createVolumeFromBaseImageCallBack(null, null)); caller.setContext(context); @@ -1174,7 +1178,7 @@ public class VolumeServiceImpl implements VolumeService { // Refresh the volume info from the DB. volumeInfo = volFactory.getVolume(volumeInfo.getId(), destPrimaryDataStore); - Map details = new HashMap(); + Map details = new HashMap<>(); details.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString()); details.put(PrimaryDataStore.STORAGE_HOST, destPrimaryDataStore.getHostAddress()); details.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(destPrimaryDataStore.getPort())); @@ -1278,12 +1282,12 @@ public class VolumeServiceImpl implements VolumeService { // Refresh the volume info from the DB. volumeInfo = volFactory.getVolume(volumeInfo.getId(), primaryDataStore); - ManagedCreateBaseImageContext context = new ManagedCreateBaseImageContext(null, volumeInfo, primaryDataStore, srcTemplateInfo, future); + ManagedCreateBaseImageContext context = new ManagedCreateBaseImageContext<>(null, volumeInfo, primaryDataStore, srcTemplateInfo, future); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().managedCopyBaseImageCallback(null, null)).setContext(context); - Map details = new HashMap(); + Map details = new HashMap<>(); details.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString()); details.put(PrimaryDataStore.STORAGE_HOST, primaryDataStore.getHostAddress()); @@ -1639,14 +1643,14 @@ public class VolumeServiceImpl implements VolumeService { @Override public AsyncCallFuture createVolumeFromSnapshot(VolumeInfo volume, DataStore store, SnapshotInfo snapshot) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); try { DataObject volumeOnStore = store.create(volume); volumeOnStore.processEvent(Event.CreateOnlyRequested); _volumeDetailsDao.addDetail(volume.getId(), SNAPSHOT_ID, Long.toString(snapshot.getId()), false); - CreateVolumeFromBaseImageContext context = new CreateVolumeFromBaseImageContext(null, volume, store, volumeOnStore, future, snapshot, null); + CreateVolumeFromBaseImageContext context = new CreateVolumeFromBaseImageContext<>(null, volume, store, volumeOnStore, future, snapshot, null); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().createVolumeFromSnapshotCallback(null, null)).setContext(context); motionSrv.copyAsync(snapshot, volumeOnStore, caller); @@ -1733,7 +1737,7 @@ public class VolumeServiceImpl implements VolumeService { } protected AsyncCallFuture copyVolumeFromImageToPrimary(VolumeInfo srcVolume, DataStore destStore) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); VolumeApiResult res = new VolumeApiResult(srcVolume); VolumeInfo destVolume = null; try { @@ -1741,7 +1745,7 @@ public class VolumeServiceImpl implements VolumeService { destVolume.processEvent(Event.CopyingRequested); srcVolume.processEvent(Event.CopyingRequested); - CopyVolumeContext context = new CopyVolumeContext(null, future, srcVolume, destVolume, destStore); + CopyVolumeContext context = new CopyVolumeContext<>(null, future, srcVolume, destVolume, destStore); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().copyVolumeFromImageToPrimaryCallback(null, null)).setContext(context); @@ -1787,7 +1791,7 @@ public class VolumeServiceImpl implements VolumeService { } protected AsyncCallFuture copyVolumeFromPrimaryToImage(VolumeInfo srcVolume, DataStore destStore) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); VolumeApiResult res = new VolumeApiResult(srcVolume); VolumeInfo destVolume = null; try { @@ -1795,7 +1799,7 @@ public class VolumeServiceImpl implements VolumeService { srcVolume.processEvent(Event.MigrationRequested); // this is just used for locking that src volume record in DB to avoid using lock destVolume.processEventOnly(Event.CreateOnlyRequested); - CopyVolumeContext context = new CopyVolumeContext(null, future, srcVolume, destVolume, destStore); + CopyVolumeContext context = new CopyVolumeContext<>(null, future, srcVolume, destVolume, destStore); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().copyVolumeFromPrimaryToImageCallback(null, null)).setContext(context); @@ -1868,7 +1872,7 @@ public class VolumeServiceImpl implements VolumeService { // OfflineVmwareMigration: aren't we missing secondary to secondary in this logic? - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); VolumeApiResult res = new VolumeApiResult(srcVolume); try { if (!snapshotMgr.canOperateOnVolume(srcVolume)) { @@ -1884,7 +1888,7 @@ public class VolumeServiceImpl implements VolumeService { destVolume.processEvent(Event.MigrationCopyRequested); srcVolume.processEvent(Event.MigrationRequested); - CopyVolumeContext context = new CopyVolumeContext(null, future, srcVolume, destVolume, destStore); + CopyVolumeContext context = new CopyVolumeContext<>(null, future, srcVolume, destVolume, destStore); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().copyVolumeCallBack(null, null)).setContext(context); motionSrv.copyAsync(srcVolume, destVolume, caller); @@ -2018,7 +2022,7 @@ public class VolumeServiceImpl implements VolumeService { } private AsyncCallFuture copyManagedVolume(VolumeInfo srcVolume, DataStore destStore) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); VolumeApiResult res = new VolumeApiResult(srcVolume); try { if (!snapshotMgr.canOperateOnVolume(srcVolume)) { @@ -2035,7 +2039,7 @@ public class VolumeServiceImpl implements VolumeService { return future; } - List poolIds = new ArrayList(); + List poolIds = new ArrayList<>(); poolIds.add(srcVolume.getPoolId()); poolIds.add(destStore.getId()); @@ -2067,7 +2071,7 @@ public class VolumeServiceImpl implements VolumeService { PrimaryDataStore srcPrimaryDataStore = (PrimaryDataStore) srcVolume.getDataStore(); if (srcPrimaryDataStore.isManaged()) { - Map srcPrimaryDataStoreDetails = new HashMap(); + Map srcPrimaryDataStoreDetails = new HashMap<>(); srcPrimaryDataStoreDetails.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString()); srcPrimaryDataStoreDetails.put(PrimaryDataStore.STORAGE_HOST, srcPrimaryDataStore.getHostAddress()); srcPrimaryDataStoreDetails.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(srcPrimaryDataStore.getPort())); @@ -2080,7 +2084,7 @@ public class VolumeServiceImpl implements VolumeService { } PrimaryDataStore destPrimaryDataStore = (PrimaryDataStore) destStore; - Map destPrimaryDataStoreDetails = new HashMap(); + Map destPrimaryDataStoreDetails = new HashMap<>(); destPrimaryDataStoreDetails.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString()); destPrimaryDataStoreDetails.put(PrimaryDataStore.STORAGE_HOST, destPrimaryDataStore.getHostAddress()); destPrimaryDataStoreDetails.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(destPrimaryDataStore.getPort())); @@ -2095,7 +2099,7 @@ public class VolumeServiceImpl implements VolumeService { destVolume.processEvent(Event.CreateRequested); srcVolume.processEvent(Event.MigrationRequested); - CopyManagedVolumeContext context = new CopyManagedVolumeContext(null, future, srcVolume, destVolume, hostWithPoolsAccess); + CopyManagedVolumeContext context = new CopyManagedVolumeContext<>(null, future, srcVolume, destVolume, hostWithPoolsAccess); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().copyManagedVolumeCallBack(null, null)).setContext(context); @@ -2233,7 +2237,7 @@ public class VolumeServiceImpl implements VolumeService { @Override public AsyncCallFuture migrateVolume(VolumeInfo srcVolume, DataStore destStore) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); VolumeApiResult res = new VolumeApiResult(srcVolume); try { if (!snapshotMgr.canOperateOnVolume(srcVolume)) { @@ -2245,7 +2249,7 @@ public class VolumeServiceImpl implements VolumeService { VolumeInfo destVolume = volFactory.getVolume(srcVolume.getId(), destStore); srcVolume.processEvent(Event.MigrationRequested); - MigrateVolumeContext context = new MigrateVolumeContext(null, future, srcVolume, destVolume, destStore); + MigrateVolumeContext context = new MigrateVolumeContext<>(null, future, srcVolume, destVolume, destStore); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().migrateVolumeCallBack(null, null)).setContext(context); motionSrv.copyAsync(srcVolume, destVolume, caller); @@ -2298,13 +2302,13 @@ public class VolumeServiceImpl implements VolumeService { @Override public AsyncCallFuture migrateVolumes(Map volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); CommandResult res = new CommandResult(); try { // Check to make sure there are no snapshot operations on a volume // and // put it in the migrating state. - List volumesMigrating = new ArrayList(); + List volumesMigrating = new ArrayList<>(); for (Map.Entry entry : volumeMap.entrySet()) { VolumeInfo volume = entry.getKey(); if (!snapshotMgr.canOperateOnVolume(volume)) { @@ -2324,7 +2328,7 @@ public class VolumeServiceImpl implements VolumeService { } } - MigrateVmWithVolumesContext context = new MigrateVmWithVolumesContext(null, future, volumeMap); + MigrateVmWithVolumesContext context = new MigrateVmWithVolumesContext<>(null, future, volumeMap); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().migrateVmWithVolumesCallBack(null, null)).setContext(context); motionSrv.copyAsync(volumeMap, vmTo, srcHost, destHost, caller); @@ -2371,13 +2375,13 @@ public class VolumeServiceImpl implements VolumeService { @Override public AsyncCallFuture registerVolume(VolumeInfo volume, DataStore store) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); DataObject volumeOnStore = store.create(volume); volumeOnStore.processEvent(Event.CreateOnlyRequested); try { - CreateVolumeContext context = new CreateVolumeContext(null, volumeOnStore, future); + CreateVolumeContext context = new CreateVolumeContext<>(null, volumeOnStore, future); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().registerVolumeCallback(null, null)); caller.setContext(context); @@ -2472,7 +2476,7 @@ public class VolumeServiceImpl implements VolumeService { @Override public AsyncCallFuture resize(VolumeInfo volume) { - AsyncCallFuture future = new AsyncCallFuture(); + AsyncCallFuture future = new AsyncCallFuture<>(); VolumeApiResult result = new VolumeApiResult(volume); try { volume.processEvent(Event.ResizeRequested); @@ -2482,7 +2486,7 @@ public class VolumeServiceImpl implements VolumeService { future.complete(result); return future; } - CreateVolumeContext context = new CreateVolumeContext(null, volume, future); + CreateVolumeContext context = new CreateVolumeContext<>(null, volume, future); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().resizeVolumeCallback(caller, context)).setContext(context); @@ -2581,7 +2585,7 @@ public class VolumeServiceImpl implements VolumeService { // find all the db volumes including those with NULL url column to avoid accidentally deleting volumes on image store later. List dbVolumes = _volumeStoreDao.listByStoreId(storeId); - List toBeDownloaded = new ArrayList(dbVolumes); + List toBeDownloaded = new ArrayList<>(dbVolumes); for (VolumeDataStoreVO volumeStore : dbVolumes) { VolumeVO volume = volDao.findById(volumeStore.getVolumeId()); if (volume == null) { @@ -2797,6 +2801,16 @@ public class VolumeServiceImpl implements VolumeService { } } + @Override + public void validateChangeDiskOfferingEncryptionType(long existingDiskOfferingId, long newDiskOfferingId) { + DiskOfferingVO existingDiskOffering = diskOfferingDao.findByIdIncludingRemoved(existingDiskOfferingId); + DiskOfferingVO newDiskOffering = diskOfferingDao.findById(newDiskOfferingId); + + if (existingDiskOffering.getEncrypt() != newDiskOffering.getEncrypt()) { + throw new InvalidParameterValueException("Cannot change the encryption type of a volume, please check the selected offering"); + } + } + @Override public Pair checkAndRepairVolume(VolumeInfo volume) { Long poolId = volume.getPoolId(); diff --git a/engine/storage/volume/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-volume-core-context.xml b/engine/storage/volume/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-volume-core-context.xml index a0cb6a78e91..57cb7f191ff 100644 --- a/engine/storage/volume/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-volume-core-context.xml +++ b/engine/storage/volume/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-volume-core-context.xml @@ -45,5 +45,5 @@ - + diff --git a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImplTest.java b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImplTest.java new file mode 100644 index 00000000000..355eb075129 --- /dev/null +++ b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/BasePrimaryDataStoreLifeCycleImplTest.java @@ -0,0 +1,127 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.lifecycle; + +import static org.mockito.ArgumentMatchers.eq; + +import java.util.Arrays; +import java.util.List; + +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.storage.datastore.PrimaryDataStoreImpl; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.host.HostVO; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.utils.Pair; + +@RunWith(MockitoJUnitRunner.class) +public class BasePrimaryDataStoreLifeCycleImplTest { + + @Mock + private StoragePoolHostDao storagePoolHostDao; + + @Mock + private PrimaryDataStoreHelper dataStoreHelper; + + @Mock + private AgentManager agentManager; + + @Mock + private ResourceManager resourceManager; + + @Mock + private StorageManager storageManager; + + @Spy + @InjectMocks + private BasePrimaryDataStoreLifeCycleImpl dataStoreLifeCycle; + + private static final Long POOL_ID = 1L; + private static final Long CLUSTER_ID = 2L; + private static final Long POD_ID = 3L; + private static final Long ZONE_ID = 4L; + private static final Long HOST_ID = 5L; + + private static ClusterScope clusterScope; + private static PrimaryDataStoreImpl store; + + + @BeforeClass + public static void init() { + clusterScope = new ClusterScope(CLUSTER_ID, POD_ID, ZONE_ID); + StoragePoolVO pool = new StoragePoolVO(POOL_ID, null, null, Storage.StoragePoolType.NetworkFilesystem, 0L, 0L, 0L, 0L, null, 0, null); + store = new PrimaryDataStoreImpl(); + store.configure(pool, null, null); + } + + @Test + public void testChangeStoragePoolScopeToZone() throws Exception { + Mockito.when(resourceManager.listAllHostsInOneZoneNotInClusterByHypervisor(HypervisorType.KVM, ZONE_ID, CLUSTER_ID)).thenReturn(null); + + dataStoreLifeCycle.changeStoragePoolScopeToZone(store, clusterScope, HypervisorType.KVM); + + Mockito.verify(dataStoreHelper, Mockito.times(1)).switchToZone(store, HypervisorType.KVM); + + HostVO host = new HostVO(null); + ReflectionTestUtils.setField(host, "id", HOST_ID); + List hypervisorTypes = Arrays.asList(HypervisorType.KVM, HypervisorType.VMware); + Mockito.when(resourceManager.listAllHostsInOneZoneNotInClusterByHypervisors(hypervisorTypes, ZONE_ID, CLUSTER_ID)).thenReturn(Arrays.asList(host)); + Mockito.when(storageManager.connectHostToSharedPool(HOST_ID, POOL_ID)).thenReturn(true); + + dataStoreLifeCycle.changeStoragePoolScopeToZone(store, clusterScope, null); + + Mockito.verify(dataStoreHelper, Mockito.times(1)).switchToZone(store, null); + } + + @Test + public void testChangeStoragePoolScopeToCluster() { + Pair, Integer> hostPoolRecords = new Pair<>(null, 0); + Mockito.when(storagePoolHostDao.listByPoolIdNotInCluster(CLUSTER_ID, POOL_ID)).thenReturn(hostPoolRecords); + Mockito.doNothing().when(dataStoreHelper).switchToCluster(store, clusterScope); + + dataStoreLifeCycle.changeStoragePoolScopeToCluster(store, clusterScope, HypervisorType.KVM); + + hostPoolRecords.set(Arrays.asList(new StoragePoolHostVO(POOL_ID, HOST_ID, null)), 1); + Answer answer = new Answer(null, false, null); + Mockito.when(storagePoolHostDao.listByPoolIdNotInCluster(CLUSTER_ID, POOL_ID)).thenReturn(hostPoolRecords); + Mockito.when(agentManager.easySend(eq(HOST_ID), Mockito.any(DeleteStoragePoolCommand.class))).thenReturn(answer); + + dataStoreLifeCycle.changeStoragePoolScopeToCluster(store, clusterScope, HypervisorType.KVM); + + Mockito.verify(dataStoreHelper, Mockito.times(2)).switchToCluster(store, clusterScope); + } +} diff --git a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java index 3a7fcfb6338..c4241dfbc3a 100644 --- a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java +++ b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java @@ -19,6 +19,23 @@ package org.apache.cloudstack.storage.volume; +import com.cloud.agent.api.storage.CheckAndRepairVolumeAnswer; +import com.cloud.agent.api.storage.CheckAndRepairVolumeCommand; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.StorageUnavailableException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.storage.CheckAndRepairVolumePayload; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.snapshot.SnapshotManager; +import com.cloud.utils.Pair; import java.util.ArrayList; import java.util.Arrays; @@ -39,21 +56,6 @@ import org.mockito.Mockito; import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; -import com.cloud.agent.api.storage.CheckAndRepairVolumeAnswer; -import com.cloud.agent.api.storage.CheckAndRepairVolumeCommand; -import com.cloud.agent.api.to.StorageFilerTO; -import com.cloud.exception.StorageUnavailableException; -import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; -import com.cloud.storage.CheckAndRepairVolumePayload; -import com.cloud.storage.Storage; -import com.cloud.storage.StorageManager; -import com.cloud.storage.StoragePool; -import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.storage.snapshot.SnapshotManager; -import com.cloud.utils.Pair; - import junit.framework.TestCase; @RunWith(MockitoJUnitRunner.class) @@ -92,6 +94,9 @@ public class VolumeServiceTest extends TestCase{ @Mock HostDao hostDaoMock; + @Mock + DiskOfferingDao diskOfferingDaoMock; + @Before public void setup(){ volumeServiceImplSpy = Mockito.spy(new VolumeServiceImpl()); @@ -100,6 +105,7 @@ public class VolumeServiceTest extends TestCase{ volumeServiceImplSpy.snapshotMgr = snapshotManagerMock; volumeServiceImplSpy._storageMgr = storageManagerMock; volumeServiceImplSpy._hostDao = hostDaoMock; + volumeServiceImplSpy.diskOfferingDao = diskOfferingDaoMock; } @Test(expected = InterruptedException.class) @@ -309,4 +315,40 @@ public class VolumeServiceTest extends TestCase{ Assert.assertEquals(null, result); } + + @Test + public void validateDiskOfferingCheckForEncryption1Test() { + prepareOfferingsForEncryptionValidation(1L, true); + prepareOfferingsForEncryptionValidation(2L, true); + volumeServiceImplSpy.validateChangeDiskOfferingEncryptionType(1L, 2L); + } + + @Test + public void validateDiskOfferingCheckForEncryption2Test() { + prepareOfferingsForEncryptionValidation(1L, false); + prepareOfferingsForEncryptionValidation(2L, false); + volumeServiceImplSpy.validateChangeDiskOfferingEncryptionType(1L, 2L); + } + + @Test (expected = InvalidParameterValueException.class) + public void validateDiskOfferingCheckForEncryptionFail1Test() { + prepareOfferingsForEncryptionValidation(1L, false); + prepareOfferingsForEncryptionValidation(2L, true); + volumeServiceImplSpy.validateChangeDiskOfferingEncryptionType(1L, 2L); + } + + @Test (expected = InvalidParameterValueException.class) + public void validateDiskOfferingCheckForEncryptionFail2Test() { + prepareOfferingsForEncryptionValidation(1L, true); + prepareOfferingsForEncryptionValidation(2L, false); + volumeServiceImplSpy.validateChangeDiskOfferingEncryptionType(1L, 2L); + } + + private void prepareOfferingsForEncryptionValidation(long diskOfferingId, boolean encryption) { + DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class); + + Mockito.when(diskOffering.getEncrypt()).thenReturn(encryption); + Mockito.when(diskOfferingDaoMock.findByIdIncludingRemoved(diskOfferingId)).thenReturn(diskOffering); + Mockito.when(diskOfferingDaoMock.findById(diskOfferingId)).thenReturn(diskOffering); + } } diff --git a/engine/storage/volume/src/test/resource/testContext.xml b/engine/storage/volume/src/test/resource/testContext.xml index 7352b1148f7..c168582bab8 100644 --- a/engine/storage/volume/src/test/resource/testContext.xml +++ b/engine/storage/volume/src/test/resource/testContext.xml @@ -22,7 +22,7 @@ xmlns:tx="http://www.springframework.org/schema/tx" xmlns:aop="http://www.springframework.org/schema/aop" xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd - http://www.springframework.org/schema/tx + http://www.springframework.org/schema/tx http://www.springframework.org/schema/tx/spring-tx.xsd http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd @@ -38,7 +38,7 @@ - + @@ -49,7 +49,7 @@ - + @@ -58,7 +58,7 @@ - + @@ -68,12 +68,12 @@ - + - + diff --git a/engine/userdata/cloud-init/src/main/java/org/apache/cloudstack/userdata/CloudInitUserDataProvider.java b/engine/userdata/cloud-init/src/main/java/org/apache/cloudstack/userdata/CloudInitUserDataProvider.java index 6e1086c631e..02e6adcc784 100644 --- a/engine/userdata/cloud-init/src/main/java/org/apache/cloudstack/userdata/CloudInitUserDataProvider.java +++ b/engine/userdata/cloud-init/src/main/java/org/apache/cloudstack/userdata/CloudInitUserDataProvider.java @@ -85,14 +85,20 @@ public class CloudInitUserDataProvider extends AdapterBase implements UserDataPr .filter(x -> (x.startsWith("#") && !x.startsWith("##")) || (x.startsWith("Content-Type:"))) .collect(Collectors.toList()); if (CollectionUtils.isEmpty(lines)) { - throw new CloudRuntimeException("Failed to detect the user data format type as it " + - "does not contain a header"); + logger.debug("Failed to detect the user data format type as it does not contain a header"); + return null; } return lines.get(0); } - protected FormatType mapUserDataHeaderToFormatType(String header) { - if (header.equalsIgnoreCase("#cloud-config")) { + protected FormatType mapUserDataHeaderToFormatType(String header, FormatType defaultFormatType) { + if (StringUtils.isBlank(header)) { + if (defaultFormatType == null) { + throw new CloudRuntimeException("Failed to detect the user data format type as it does not contain a header"); + } + logger.debug(String.format("Empty header for userdata, using the default format type: %s", defaultFormatType.name())); + return defaultFormatType; + } else if (header.equalsIgnoreCase("#cloud-config")) { return FormatType.CLOUD_CONFIG; } else if (header.startsWith("#!")) { return FormatType.BASH_SCRIPT; @@ -112,9 +118,11 @@ public class CloudInitUserDataProvider extends AdapterBase implements UserDataPr /** * Detect the user data type + * @param userdata the userdata string to detect the type + * @param defaultFormatType if not null, then use it in case the header does not exist in the userdata, otherwise fail * Reference: */ - protected FormatType getUserDataFormatType(String userdata) { + protected FormatType getUserDataFormatType(String userdata, FormatType defaultFormatType) { if (StringUtils.isBlank(userdata)) { String msg = "User data expected but provided empty user data"; logger.error(msg); @@ -122,7 +130,7 @@ public class CloudInitUserDataProvider extends AdapterBase implements UserDataPr } String header = extractUserDataHeader(userdata); - return mapUserDataHeaderToFormatType(header); + return mapUserDataHeaderToFormatType(header, defaultFormatType); } private String getContentType(String userData, FormatType formatType) throws MessagingException { @@ -231,7 +239,9 @@ public class CloudInitUserDataProvider extends AdapterBase implements UserDataPr } private String simpleAppendSameFormatTypeUserData(String userData1, String userData2) { - return String.format("%s\n\n%s", userData1, userData2.substring(userData2.indexOf('\n')+1)); + String userdata2Header = extractUserDataHeader(userData2); + int beginIndex = StringUtils.isNotBlank(userdata2Header) ? userData2.indexOf('\n')+1 : 0; + return String.format("%s\n\n%s", userData1, userData2.substring(beginIndex)); } private void checkGzipAppend(String encodedUserData1, String encodedUserData2) { @@ -246,8 +256,8 @@ public class CloudInitUserDataProvider extends AdapterBase implements UserDataPr checkGzipAppend(encodedUserData1, encodedUserData2); String userData1 = new String(Base64.decodeBase64(encodedUserData1)); String userData2 = new String(Base64.decodeBase64(encodedUserData2)); - FormatType formatType1 = getUserDataFormatType(userData1); - FormatType formatType2 = getUserDataFormatType(userData2); + FormatType formatType1 = getUserDataFormatType(userData1, null); + FormatType formatType2 = getUserDataFormatType(userData2, formatType1); if (formatType1.equals(formatType2) && List.of(FormatType.CLOUD_CONFIG, FormatType.BASH_SCRIPT).contains(formatType1)) { return simpleAppendSameFormatTypeUserData(userData1, userData2); } diff --git a/engine/userdata/cloud-init/src/test/java/org/apache/cloudstack/userdata/CloudInitUserDataProviderTest.java b/engine/userdata/cloud-init/src/test/java/org/apache/cloudstack/userdata/CloudInitUserDataProviderTest.java index 4ca9fb7ebd6..86b6a6fb6ea 100644 --- a/engine/userdata/cloud-init/src/test/java/org/apache/cloudstack/userdata/CloudInitUserDataProviderTest.java +++ b/engine/userdata/cloud-init/src/test/java/org/apache/cloudstack/userdata/CloudInitUserDataProviderTest.java @@ -73,21 +73,28 @@ public class CloudInitUserDataProviderTest { @Test public void testGetUserDataFormatType() { - CloudInitUserDataProvider.FormatType type = provider.getUserDataFormatType(CLOUD_CONFIG_USERDATA); + CloudInitUserDataProvider.FormatType type = provider.getUserDataFormatType(CLOUD_CONFIG_USERDATA, null); Assert.assertEquals(CloudInitUserDataProvider.FormatType.CLOUD_CONFIG, type); } @Test(expected = CloudRuntimeException.class) public void testGetUserDataFormatTypeNoHeader() { String userdata = "password: password\nchpasswd: { expire: False }\nssh_pwauth: True"; - provider.getUserDataFormatType(userdata); + provider.getUserDataFormatType(userdata, null); + } + + @Test + public void testGetUserDataFormatTypeNoHeaderDefaultFormat() { + String userdata = "password: password\nchpasswd: { expire: False }\nssh_pwauth: True"; + CloudInitUserDataProvider.FormatType defaultFormatType = CloudInitUserDataProvider.FormatType.CLOUD_CONFIG; + Assert.assertEquals(defaultFormatType, provider.getUserDataFormatType(userdata, defaultFormatType)); } @Test(expected = CloudRuntimeException.class) public void testGetUserDataFormatTypeInvalidType() { String userdata = "#invalid-type\n" + "password: password\nchpasswd: { expire: False }\nssh_pwauth: True"; - provider.getUserDataFormatType(userdata); + provider.getUserDataFormatType(userdata, null); } private MimeMultipart getCheckedMultipartFromMultipartData(String multipartUserData, int count) { @@ -111,6 +118,16 @@ public class CloudInitUserDataProviderTest { getCheckedMultipartFromMultipartData(multipartUserData, 2); } + @Test + public void testAppendUserDataSecondWithoutHeader() { + String userdataWithHeader = Base64.encodeBase64String(SHELL_SCRIPT_USERDATA1.getBytes()); + String bashScriptWithoutHeader = "echo \"without header\""; + String userdataWithoutHeader = Base64.encodeBase64String(bashScriptWithoutHeader.getBytes()); + String appended = provider.appendUserData(userdataWithHeader, userdataWithoutHeader); + String expected = String.format("%s\n\n%s", SHELL_SCRIPT_USERDATA1, bashScriptWithoutHeader); + Assert.assertEquals(expected, appended); + } + @Test public void testAppendSameShellScriptTypeUserData() { String result = SHELL_SCRIPT_USERDATA + "\n\n" + @@ -129,6 +146,22 @@ public class CloudInitUserDataProviderTest { Assert.assertEquals(result, appendUserData); } + @Test + public void testAppendCloudConfig() { + String userdata1 = "#cloud-config\n" + + "chpasswd:\n" + + " list: |\n" + + " root:password\n" + + " expire: False"; + String userdata2 = "write_files:\n" + + "- path: /root/CLOUD_INIT_WAS_HERE"; + String userdataWithHeader = Base64.encodeBase64String(userdata1.getBytes()); + String userdataWithoutHeader = Base64.encodeBase64String(userdata2.getBytes()); + String appended = provider.appendUserData(userdataWithHeader, userdataWithoutHeader); + String expected = String.format("%s\n\n%s", userdata1, userdata2); + Assert.assertEquals(expected, appended); + } + @Test public void testAppendUserDataMIMETemplateData() { String multipartUserData = provider.appendUserData( diff --git a/engine/userdata/src/main/java/org/apache/cloudstack/userdata/UserDataManagerImpl.java b/engine/userdata/src/main/java/org/apache/cloudstack/userdata/UserDataManagerImpl.java index 91f24fe7045..664d308e28d 100644 --- a/engine/userdata/src/main/java/org/apache/cloudstack/userdata/UserDataManagerImpl.java +++ b/engine/userdata/src/main/java/org/apache/cloudstack/userdata/UserDataManagerImpl.java @@ -27,18 +27,15 @@ import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.commons.codec.binary.Base64; import org.apache.commons.lang3.StringUtils; -import com.cloud.configuration.ConfigurationManager; import com.cloud.exception.InvalidParameterValueException; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.exception.CloudRuntimeException; public class UserDataManagerImpl extends ManagerBase implements UserDataManager { - - private static final int MAX_USER_DATA_LENGTH_BYTES = 2048; - private static final int MAX_HTTP_GET_LENGTH = 2 * MAX_USER_DATA_LENGTH_BYTES; + private static final int MAX_HTTP_GET_LENGTH = 2 * MAX_USER_DATA_LENGTH_BYTES; // 4KB private static final int NUM_OF_2K_BLOCKS = 512; - private static final int MAX_HTTP_POST_LENGTH = NUM_OF_2K_BLOCKS * MAX_USER_DATA_LENGTH_BYTES; + private static final int MAX_HTTP_POST_LENGTH = NUM_OF_2K_BLOCKS * MAX_USER_DATA_LENGTH_BYTES; // 1MB private List userDataProviders; private static Map userDataProvidersMap = new HashMap<>(); @@ -67,7 +64,7 @@ public class UserDataManagerImpl extends ManagerBase implements UserDataManager @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] {}; + return new ConfigKey[] {VM_USERDATA_MAX_LENGTH}; } protected UserDataProvider getUserdataProvider(String name) { @@ -90,49 +87,57 @@ public class UserDataManagerImpl extends ManagerBase implements UserDataManager @Override public String validateUserData(String userData, BaseCmd.HTTPMethod httpmethod) { - byte[] decodedUserData = null; - if (userData != null) { - - if (userData.contains("%")) { - try { - userData = URLDecoder.decode(userData, "UTF-8"); - } catch (UnsupportedEncodingException e) { - throw new InvalidParameterValueException("Url decoding of userdata failed."); - } - } - - if (!Base64.isBase64(userData)) { - throw new InvalidParameterValueException("User data is not base64 encoded"); - } - // If GET, use 4K. If POST, support up to 1M. - if (httpmethod.equals(BaseCmd.HTTPMethod.GET)) { - decodedUserData = validateAndDecodeByHTTPMethod(userData, MAX_HTTP_GET_LENGTH, BaseCmd.HTTPMethod.GET); - } else if (httpmethod.equals(BaseCmd.HTTPMethod.POST)) { - decodedUserData = validateAndDecodeByHTTPMethod(userData, MAX_HTTP_POST_LENGTH, BaseCmd.HTTPMethod.POST); - } - - if (decodedUserData == null || decodedUserData.length < 1) { - throw new InvalidParameterValueException("User data is too short"); - } - // Re-encode so that the '=' paddings are added if necessary since 'isBase64' does not require it, but python does on the VR. - return Base64.encodeBase64String(decodedUserData); + logger.trace(String.format("Validating base64 encoded user data: [%s].", userData)); + if (StringUtils.isBlank(userData)) { + logger.debug("Null/empty base64 encoded user data set"); + return null; } - return null; + + if (userData.contains("%")) { + try { + userData = URLDecoder.decode(userData, "UTF-8"); + } catch (UnsupportedEncodingException e) { + throw new InvalidParameterValueException("Url decoding of user data failed."); + } + } + + if (!Base64.isBase64(userData)) { + throw new InvalidParameterValueException("User data is not base64 encoded."); + } + + byte[] decodedUserData = null; + + // If GET, use 4K. If POST, support up to 1M. + if (httpmethod.equals(BaseCmd.HTTPMethod.GET)) { + decodedUserData = validateAndDecodeByHTTPMethod(userData, MAX_HTTP_GET_LENGTH, BaseCmd.HTTPMethod.GET); + } else if (httpmethod.equals(BaseCmd.HTTPMethod.POST)) { + decodedUserData = validateAndDecodeByHTTPMethod(userData, MAX_HTTP_POST_LENGTH, BaseCmd.HTTPMethod.POST); + } + + // Re-encode so that the '=' paddings are added if necessary since 'isBase64' does not require it, but python does on the VR. + return Base64.encodeBase64String(decodedUserData); } private byte[] validateAndDecodeByHTTPMethod(String userData, int maxHTTPLength, BaseCmd.HTTPMethod httpMethod) { - byte[] decodedUserData = null; + byte[] decodedUserData = Base64.decodeBase64(userData.getBytes()); + if (decodedUserData == null || decodedUserData.length < 1) { + throw new InvalidParameterValueException("User data is too short."); + } - if (userData.length() >= maxHTTPLength) { - throw new InvalidParameterValueException(String.format("User data is too long for an http %s request", httpMethod.toString())); - } - if (userData.length() > ConfigurationManager.VM_USERDATA_MAX_LENGTH.value()) { - throw new InvalidParameterValueException("User data has exceeded configurable max length : " + ConfigurationManager.VM_USERDATA_MAX_LENGTH.value()); - } - decodedUserData = Base64.decodeBase64(userData.getBytes()); - if (decodedUserData.length > maxHTTPLength) { + logger.trace(String.format("Decoded user data: [%s].", decodedUserData)); + int userDataLength = userData.length(); + int decodedUserDataLength = decodedUserData.length; + logger.info(String.format("Configured base64 encoded user data size: %d bytes, actual user data size: %d bytes", userDataLength, decodedUserDataLength)); + + if (userDataLength > maxHTTPLength) { + logger.warn(String.format("Base64 encoded user data (size: %d bytes) too long for http %s request (accepted size: %d bytes)", userDataLength, httpMethod.toString(), maxHTTPLength)); throw new InvalidParameterValueException(String.format("User data is too long for http %s request", httpMethod.toString())); } + if (userDataLength > VM_USERDATA_MAX_LENGTH.value()) { + logger.warn(String.format("Base64 encoded user data (size: %d bytes) has exceeded configurable max length of %d bytes", userDataLength, VM_USERDATA_MAX_LENGTH.value())); + throw new InvalidParameterValueException("User data has exceeded configurable max length: " + VM_USERDATA_MAX_LENGTH.value()); + } + return decodedUserData; } } diff --git a/engine/userdata/src/main/resources/META-INF/cloudstack/core/spring-engine-userdata-core-context.xml b/engine/userdata/src/main/resources/META-INF/cloudstack/core/spring-engine-userdata-core-context.xml index 0d4c6474be1..b3a587ee594 100644 --- a/engine/userdata/src/main/resources/META-INF/cloudstack/core/spring-engine-userdata-core-context.xml +++ b/engine/userdata/src/main/resources/META-INF/cloudstack/core/spring-engine-userdata-core-context.xml @@ -30,5 +30,5 @@ - + diff --git a/framework/ca/src/main/java/org/apache/cloudstack/framework/ca/CAProvider.java b/framework/ca/src/main/java/org/apache/cloudstack/framework/ca/CAProvider.java index 388cae7e007..77b3ee27783 100644 --- a/framework/ca/src/main/java/org/apache/cloudstack/framework/ca/CAProvider.java +++ b/framework/ca/src/main/java/org/apache/cloudstack/framework/ca/CAProvider.java @@ -22,6 +22,7 @@ import java.math.BigInteger; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.security.KeyStoreException; +import java.security.cert.CertificateParsingException; import java.security.cert.X509Certificate; import java.util.List; import java.util.Map; @@ -45,6 +46,7 @@ public interface CAProvider { /** * Issues certificate with provided options + * * @param domainNames * @param ipAddresses * @param validityDays @@ -104,4 +106,6 @@ public interface CAProvider { * @return returns description */ String getDescription(); + + boolean isManagementCertificate(java.security.cert.Certificate certificate) throws CertificateParsingException; } diff --git a/framework/ca/src/main/java/org/apache/cloudstack/framework/ca/CAService.java b/framework/ca/src/main/java/org/apache/cloudstack/framework/ca/CAService.java index facf13a5cb6..721c88bee50 100644 --- a/framework/ca/src/main/java/org/apache/cloudstack/framework/ca/CAService.java +++ b/framework/ca/src/main/java/org/apache/cloudstack/framework/ca/CAService.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.security.KeyStoreException; +import java.security.cert.CertificateParsingException; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLEngine; @@ -47,4 +48,6 @@ public interface CAService { * @return returns char[] passphrase */ char[] getKeyStorePassphrase(); + + boolean isManagementCertificate(java.security.cert.Certificate certificate) throws CertificateParsingException; } diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ClusterManager.java b/framework/cluster/src/main/java/com/cloud/cluster/ClusterManager.java index 1b1406c1cec..54f575830e4 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ClusterManager.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ClusterManager.java @@ -16,8 +16,8 @@ // under the License. package com.cloud.cluster; -import org.apache.cloudstack.management.ManagementServerHost; import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.management.ManagementServerHost; import com.cloud.utils.component.Manager; @@ -77,6 +77,8 @@ public interface ClusterManager extends Manager { */ String getSelfPeerName(); + String getSelfNodeIP(); + long getManagementNodeId(); /** diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java b/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java index e4e55eb9348..32fdf782696 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java @@ -40,16 +40,16 @@ import java.util.concurrent.TimeUnit; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.cluster.dao.ManagementServerStatusDao; -import org.apache.cloudstack.management.ManagementServerHost; import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.management.ManagementServerHost; import org.apache.cloudstack.utils.identity.ManagementServerNode; import com.cloud.cluster.dao.ManagementServerHostDao; import com.cloud.cluster.dao.ManagementServerHostPeerDao; +import com.cloud.cluster.dao.ManagementServerStatusDao; import com.cloud.utils.DateUtil; import com.cloud.utils.Profiler; import com.cloud.utils.component.ComponentLifecycle; @@ -128,7 +128,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C // recursive remote calls between nodes // _executor = Executors.newCachedThreadPool(new NamedThreadFactory("Cluster-Worker")); - setRunLevel(ComponentLifecycle.RUN_LEVEL_FRAMEWORK); + setRunLevel(ComponentLifecycle.RUN_LEVEL_COMPONENT); } private void registerRequestPdu(final ClusterServiceRequestPdu pdu) { @@ -294,7 +294,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } } } catch (final Throwable e) { - logger.error("Unexcpeted exception: ", e); + logger.error("Unexpected exception: ", e); } } } @@ -346,7 +346,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } }); } catch (final Throwable e) { - logger.error("Unexcpeted exception: ", e); + logger.error("Unexpected exception: ", e); } } } @@ -384,7 +384,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } executeAsync(peerName, agentId, cmds, true); } catch (final Exception e) { - logger.warn("Caught exception while talkign to " + peer.getMsid()); + logger.warn("Caught exception while talking to " + peer.getMsid()); } } } @@ -473,6 +473,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C return Long.toString(_msId); } + @Override public String getSelfNodeIP() { return _clusterNodeIP; } diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceAdapter.java b/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceAdapter.java index 735de5bdac9..e073a28a622 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceAdapter.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceAdapter.java @@ -28,7 +28,5 @@ public interface ClusterServiceAdapter extends Adapter { public ClusterService getPeerService(String strPeer) throws RemoteException; - public String getServiceEndpointName(String strPeer); - public int getServicePort(); } diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletAdapter.java b/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletAdapter.java index 937ef4a6249..3e498b1fbec 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletAdapter.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletAdapter.java @@ -23,6 +23,7 @@ import java.util.Properties; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.ca.CAManager; import org.apache.cloudstack.framework.config.ConfigDepot; import com.cloud.cluster.dao.ManagementServerHostDao; @@ -42,6 +43,8 @@ public class ClusterServiceServletAdapter extends AdapterBase implements Cluster @Inject private ManagementServerHostDao _mshostDao; @Inject + private CAManager caService; + @Inject protected ConfigDepot _configDepot; private ClusterServiceServletContainer _servletContainer; @@ -49,7 +52,7 @@ public class ClusterServiceServletAdapter extends AdapterBase implements Cluster private int _clusterServicePort = DEFAULT_SERVICE_PORT; public ClusterServiceServletAdapter() { - setRunLevel(ComponentLifecycle.RUN_LEVEL_FRAMEWORK); + setRunLevel(ComponentLifecycle.RUN_LEVEL_COMPONENT); } @Override @@ -64,12 +67,10 @@ public class ClusterServiceServletAdapter extends AdapterBase implements Cluster String serviceUrl = getServiceEndpointName(strPeer); if (serviceUrl == null) return null; - - return new ClusterServiceServletImpl(serviceUrl); + return new ClusterServiceServletImpl(serviceUrl, caService); } - @Override - public String getServiceEndpointName(String strPeer) { + protected String getServiceEndpointName(String strPeer) { try { init(); } catch (ConfigurationException e) { @@ -93,7 +94,7 @@ public class ClusterServiceServletAdapter extends AdapterBase implements Cluster private String composeEndpointName(String nodeIP, int port) { StringBuffer sb = new StringBuffer(); - sb.append("http://").append(nodeIP).append(":").append(port).append("/clusterservice"); + sb.append("https://").append(nodeIP).append(":").append(port).append("/clusterservice"); return sb.toString(); } @@ -106,7 +107,8 @@ public class ClusterServiceServletAdapter extends AdapterBase implements Cluster @Override public boolean start() { _servletContainer = new ClusterServiceServletContainer(); - _servletContainer.start(new ClusterServiceServletHttpHandler(_manager), _clusterServicePort); + _servletContainer.start(new ClusterServiceServletHttpHandler(_manager), _manager.getSelfNodeIP(), + _clusterServicePort, caService); return true; } diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletContainer.java b/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletContainer.java index ac468089f47..e8c3de98016 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletContainer.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletContainer.java @@ -17,11 +17,23 @@ package com.cloud.cluster; import java.io.IOException; -import java.net.ServerSocket; +import java.net.InetAddress; import java.net.Socket; +import java.security.GeneralSecurityException; +import java.security.cert.Certificate; +import java.security.cert.CertificateParsingException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLPeerUnverifiedException; +import javax.net.ssl.SSLServerSocket; +import javax.net.ssl.SSLServerSocketFactory; +import javax.net.ssl.SSLSession; +import javax.net.ssl.SSLSocket; + +import org.apache.cloudstack.framework.ca.CAService; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.http.ConnectionClosedException; import org.apache.http.HttpException; import org.apache.http.impl.DefaultConnectionReuseStrategy; @@ -41,12 +53,12 @@ import org.apache.http.protocol.ResponseConnControl; import org.apache.http.protocol.ResponseContent; import org.apache.http.protocol.ResponseDate; import org.apache.http.protocol.ResponseServer; - -import org.apache.cloudstack.managed.context.ManagedContextRunnable; - -import com.cloud.utils.concurrency.NamedThreadFactory; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import com.cloud.utils.StringUtils; +import com.cloud.utils.concurrency.NamedThreadFactory; +import com.cloud.utils.nio.Link; public class ClusterServiceServletContainer { @@ -55,9 +67,9 @@ public class ClusterServiceServletContainer { public ClusterServiceServletContainer() { } - public boolean start(HttpRequestHandler requestHandler, int port) { + public boolean start(HttpRequestHandler requestHandler, String ip, int port, CAService caService) { - listenerThread = new ListenerThread(requestHandler, port); + listenerThread = new ListenerThread(requestHandler, ip, port, caService); listenerThread.start(); return true; @@ -69,26 +81,46 @@ public class ClusterServiceServletContainer { } } + + protected static SSLServerSocket getSecuredServerSocket(SSLContext sslContext, String ip, int port) + throws IOException { + SSLServerSocketFactory sslFactory = sslContext.getServerSocketFactory(); + SSLServerSocket serverSocket = null; + if (StringUtils.isNotEmpty(ip)) { + serverSocket = (SSLServerSocket) sslFactory.createServerSocket(port, 0, + InetAddress.getByName(ip)); + } else { + serverSocket = (SSLServerSocket) sslFactory.createServerSocket(port); + } + serverSocket.setNeedClientAuth(true); + return serverSocket; + } + static class ListenerThread extends Thread { private static Logger LOGGER = LogManager.getLogger(ListenerThread.class); - private HttpService _httpService = null; - private volatile ServerSocket _serverSocket = null; - private HttpParams _params = null; - private ExecutorService _executor; + private HttpService httpService = null; + private volatile SSLServerSocket serverSocket = null; + private HttpParams params = null; + private ExecutorService executor; + private CAService caService = null; - public ListenerThread(HttpRequestHandler requestHandler, int port) { - _executor = Executors.newCachedThreadPool(new NamedThreadFactory("Cluster-Listener")); + public ListenerThread(HttpRequestHandler requestHandler, String ip, int port, + CAService caService) { + this.executor = Executors.newCachedThreadPool(new NamedThreadFactory("Cluster-Listener")); + this.caService = caService; try { - _serverSocket = new ServerSocket(port); - } catch (IOException ioex) { - LOGGER.error("error initializing cluster service servlet container", ioex); + SSLContext sslContext = Link.initManagementSSLContext(caService); + serverSocket = getSecuredServerSocket(sslContext, ip, port); + } catch (IOException | GeneralSecurityException e) { + LOGGER.error("Error initializing cluster service servlet container for secure connection", + e); return; } - _params = new BasicHttpParams(); - _params.setIntParameter(CoreConnectionPNames.SO_TIMEOUT, 5000) + params = new BasicHttpParams(); + params.setIntParameter(CoreConnectionPNames.SO_TIMEOUT, 5000) .setIntParameter(CoreConnectionPNames.SOCKET_BUFFER_SIZE, 8 * 1024) .setBooleanParameter(CoreConnectionPNames.STALE_CONNECTION_CHECK, false) .setBooleanParameter(CoreConnectionPNames.TCP_NODELAY, true) @@ -106,35 +138,55 @@ public class ClusterServiceServletContainer { reqistry.register("/clusterservice", requestHandler); // Set up the HTTP service - _httpService = new HttpService(httpproc, new DefaultConnectionReuseStrategy(), new DefaultHttpResponseFactory()); - _httpService.setParams(_params); - _httpService.setHandlerResolver(reqistry); + httpService = new HttpService(httpproc, new DefaultConnectionReuseStrategy(), new DefaultHttpResponseFactory()); + httpService.setParams(params); + httpService.setHandlerResolver(reqistry); } public void stopRunning() { - if (_serverSocket != null) { + if (serverSocket != null) { try { - _serverSocket.close(); + serverSocket.close(); } catch (IOException e) { LOGGER.info("[ignored] error on closing server socket", e); } - _serverSocket = null; + serverSocket = null; } } + protected boolean isValidPeerConnection(Socket socket) throws SSLPeerUnverifiedException, + CertificateParsingException { + SSLSocket sslSocket = (SSLSocket) socket; + SSLSession session = sslSocket.getSession(); + if (session == null || !session.isValid()) { + return false; + } + Certificate[] certs = session.getPeerCertificates(); + if (certs == null || certs.length < 1) { + return false; + } + return caService.isManagementCertificate(certs[0]); + } + @Override public void run() { if (LOGGER.isInfoEnabled()) - LOGGER.info("Cluster service servlet container listening on port " + _serverSocket.getLocalPort()); + LOGGER.info(String.format("Cluster service servlet container listening on host: %s and port %d", + serverSocket.getInetAddress().getHostAddress(), serverSocket.getLocalPort())); - while (_serverSocket != null) { + while (serverSocket != null) { try { // Set up HTTP connection - Socket socket = _serverSocket.accept(); + Socket socket = serverSocket.accept(); final DefaultHttpServerConnection conn = new DefaultHttpServerConnection(); - conn.bind(socket, _params); - - _executor.execute(new ManagedContextRunnable() { + conn.bind(socket, params); + if (!isValidPeerConnection(socket)) { + LOGGER.warn(String.format("Failure during validating cluster request from %s", + socket.getInetAddress().getHostAddress())); + conn.shutdown(); + continue; + } + executor.execute(new ManagedContextRunnable() { @Override protected void runInContext() { HttpContext context = new BasicHttpContext(null); @@ -143,7 +195,7 @@ public class ClusterServiceServletContainer { if (LOGGER.isTraceEnabled()) LOGGER.trace("dispatching cluster request from " + conn.getRemoteAddress().toString()); - _httpService.handleRequest(conn, context); + httpService.handleRequest(conn, context); if (LOGGER.isTraceEnabled()) LOGGER.trace("Cluster request from " + conn.getRemoteAddress().toString() + " is processed"); @@ -178,7 +230,7 @@ public class ClusterServiceServletContainer { } } - _executor.shutdown(); + executor.shutdown(); if (LOGGER.isInfoEnabled()) LOGGER.info("Cluster service servlet container shutdown"); } diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletImpl.java b/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletImpl.java index b60012dbeef..d582538c31e 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletImpl.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletImpl.java @@ -17,99 +17,144 @@ package com.cloud.cluster; import java.io.IOException; +import java.io.UnsupportedEncodingException; import java.rmi.RemoteException; +import java.security.GeneralSecurityException; +import java.util.ArrayList; +import java.util.List; -import org.apache.commons.httpclient.HttpClient; -import org.apache.commons.httpclient.HttpException; +import javax.net.ssl.SSLContext; + +import org.apache.cloudstack.framework.ca.CAService; import org.apache.commons.httpclient.HttpStatus; -import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager; -import org.apache.commons.httpclient.methods.PostMethod; -import org.apache.commons.httpclient.params.HttpClientParams; -import org.apache.logging.log4j.Logger; +import org.apache.http.NameValuePair; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.entity.UrlEncodedFormEntity; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.message.BasicNameValuePair; +import org.apache.http.util.EntityUtils; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import com.cloud.utils.HttpUtils; import com.cloud.utils.Profiler; +import com.cloud.utils.nio.Link; +import com.google.gson.Gson; public class ClusterServiceServletImpl implements ClusterService { private static final long serialVersionUID = 4574025200012566153L; protected Logger logger = LogManager.getLogger(getClass()); - private String _serviceUrl; + private String serviceUrl; - protected static HttpClient s_client = null; + private CAService caService; + + private Gson gson = new Gson(); + + protected static CloseableHttpClient s_client = null; + + private void logPostParametersForFailedEncoding(List parameters) { + if (logger.isTraceEnabled()) { + logger.trace(String.format("%s encoding failed for POST parameters: %s", HttpUtils.UTF_8, + gson.toJson(parameters))); + } + } public ClusterServiceServletImpl() { } - public ClusterServiceServletImpl(final String serviceUrl) { - logger.info("Setup cluster service servlet. service url: " + serviceUrl + ", request timeout: " + ClusterServiceAdapter.ClusterMessageTimeOut.value() + - " seconds"); + public ClusterServiceServletImpl(final String serviceUrl, final CAService caService) { + logger.info(String.format("Setup cluster service servlet. service url: %s, request timeout: %d seconds", serviceUrl, + ClusterServiceAdapter.ClusterMessageTimeOut.value())); + this.serviceUrl = serviceUrl; + this.caService = caService; + } - _serviceUrl = serviceUrl; + protected List getClusterServicePduPostParameters(final ClusterServicePdu pdu) { + List postParameters = new ArrayList<>(); + postParameters.add(new BasicNameValuePair("method", Integer.toString(RemoteMethodConstants.METHOD_DELIVER_PDU))); + postParameters.add(new BasicNameValuePair("sourcePeer", pdu.getSourcePeer())); + postParameters.add(new BasicNameValuePair("destPeer", pdu.getDestPeer())); + postParameters.add(new BasicNameValuePair("pduSeq", Long.toString(pdu.getSequenceId()))); + postParameters.add(new BasicNameValuePair("pduAckSeq", Long.toString(pdu.getAckSequenceId()))); + postParameters.add(new BasicNameValuePair("agentId", Long.toString(pdu.getAgentId()))); + postParameters.add(new BasicNameValuePair("gsonPackage", pdu.getJsonPackage())); + postParameters.add(new BasicNameValuePair("stopOnError", pdu.isStopOnError() ? "1" : "0")); + postParameters.add(new BasicNameValuePair("pduType", Integer.toString(pdu.getPduType()))); + return postParameters; } @Override public String execute(final ClusterServicePdu pdu) throws RemoteException { - - final HttpClient client = getHttpClient(); - final PostMethod method = new PostMethod(_serviceUrl); - - method.addParameter("method", Integer.toString(RemoteMethodConstants.METHOD_DELIVER_PDU)); - method.addParameter("sourcePeer", pdu.getSourcePeer()); - method.addParameter("destPeer", pdu.getDestPeer()); - method.addParameter("pduSeq", Long.toString(pdu.getSequenceId())); - method.addParameter("pduAckSeq", Long.toString(pdu.getAckSequenceId())); - method.addParameter("agentId", Long.toString(pdu.getAgentId())); - method.addParameter("gsonPackage", pdu.getJsonPackage()); - method.addParameter("stopOnError", pdu.isStopOnError() ? "1" : "0"); - method.addParameter("pduType", Integer.toString(pdu.getPduType())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Executing ClusterServicePdu with service URL: %s", serviceUrl)); + } + final CloseableHttpClient client = getHttpClient(); + final HttpPost method = new HttpPost(serviceUrl); + final List postParameters = getClusterServicePduPostParameters(pdu); + try { + method.setEntity(new UrlEncodedFormEntity(postParameters, HttpUtils.UTF_8)); + } catch (UnsupportedEncodingException e) { + logger.error("Failed to encode request POST parameters", e); + logPostParametersForFailedEncoding(postParameters); + throw new RemoteException("Failed to encode request POST parameters", e); + } return executePostMethod(client, method); } + protected List getPingPostParameters(final String callingPeer) { + List postParameters = new ArrayList<>(); + postParameters.add(new BasicNameValuePair("method", Integer.toString(RemoteMethodConstants.METHOD_PING))); + postParameters.add(new BasicNameValuePair("callingPeer", callingPeer)); + return postParameters; + } + @Override public boolean ping(final String callingPeer) throws RemoteException { if (logger.isDebugEnabled()) { - logger.debug("Ping at " + _serviceUrl); + logger.debug("Ping at " + serviceUrl); } - final HttpClient client = getHttpClient(); - final PostMethod method = new PostMethod(_serviceUrl); + final CloseableHttpClient client = getHttpClient(); + final HttpPost method = new HttpPost(serviceUrl); - method.addParameter("method", Integer.toString(RemoteMethodConstants.METHOD_PING)); - method.addParameter("callingPeer", callingPeer); + List postParameters = getPingPostParameters(callingPeer); + try { + method.setEntity(new UrlEncodedFormEntity(postParameters, HttpUtils.UTF_8)); + } catch (UnsupportedEncodingException e) { + logger.error("Failed to encode ping request POST parameters", e); + logPostParametersForFailedEncoding(postParameters); + throw new RemoteException("Failed to encode ping request POST parameters", e); + } final String returnVal = executePostMethod(client, method); - if ("true".equalsIgnoreCase(returnVal)) { - return true; - } - return false; + return Boolean.TRUE.toString().equalsIgnoreCase(returnVal); } - private String executePostMethod(final HttpClient client, final PostMethod method) { - int response = 0; + private String executePostMethod(final CloseableHttpClient client, final HttpPost method) { String result = null; try { final Profiler profiler = new Profiler(); profiler.start(); - response = client.executeMethod(method); + CloseableHttpResponse httpResponse = client.execute(method); + int response = httpResponse.getStatusLine().getStatusCode(); if (response == HttpStatus.SC_OK) { - result = method.getResponseBodyAsString(); + result = EntityUtils.toString(httpResponse.getEntity()); profiler.stop(); if (logger.isDebugEnabled()) { - logger.debug("POST " + _serviceUrl + " response :" + result + ", responding time: " + profiler.getDurationInMillis() + " ms"); + logger.debug("POST " + serviceUrl + " response :" + result + ", responding time: " + profiler.getDurationInMillis() + " ms"); } } else { profiler.stop(); - logger.error("Invalid response code : " + response + ", from : " + _serviceUrl + ", method : " + method.getParameter("method") + " responding time: " + + logger.error("Invalid response code : " + response + ", from : " + serviceUrl + ", method : " + method.getParams().getParameter("method") + " responding time: " + profiler.getDurationInMillis()); } - } catch (final HttpException e) { - logger.error("HttpException from : " + _serviceUrl + ", method : " + method.getParameter("method")); - } catch (final IOException e) { - logger.error("IOException from : " + _serviceUrl + ", method : " + method.getParameter("method")); - } catch (final Throwable e) { - logger.error("Exception from : " + _serviceUrl + ", method : " + method.getParameter("method") + ", exception :", e); + } catch (IOException e) { + logger.error("Exception from : " + serviceUrl + ", method : " + method.getParams().getParameter("method") + ", exception :", e); } finally { method.releaseConnection(); } @@ -117,20 +162,25 @@ public class ClusterServiceServletImpl implements ClusterService { return result; } - private HttpClient getHttpClient() { - + private CloseableHttpClient getHttpClient() { if (s_client == null) { - final MultiThreadedHttpConnectionManager mgr = new MultiThreadedHttpConnectionManager(); - mgr.getParams().setDefaultMaxConnectionsPerHost(4); + SSLContext sslContext = null; + try { + sslContext = Link.initManagementSSLContext(caService); + } catch (GeneralSecurityException | IOException e) { + throw new RuntimeException(e); + } - // TODO make it configurable - mgr.getParams().setMaxTotalConnections(1000); + int timeout = ClusterServiceAdapter.ClusterMessageTimeOut.value() * 1000; + RequestConfig config = RequestConfig.custom() + .setConnectTimeout(timeout) + .setConnectionRequestTimeout(timeout) + .setSocketTimeout(timeout).build(); - s_client = new HttpClient(mgr); - final HttpClientParams clientParams = new HttpClientParams(); - clientParams.setSoTimeout(ClusterServiceAdapter.ClusterMessageTimeOut.value() * 1000); - - s_client.setParams(clientParams); + s_client = HttpClientBuilder.create() + .setDefaultRequestConfig(config) + .setSSLContext(sslContext) + .build(); } return s_client; } diff --git a/framework/cluster/src/main/resources/META-INF/cloudstack/core/spring-framework-cluster-core-context.xml b/framework/cluster/src/main/resources/META-INF/cloudstack/core/spring-framework-cluster-core-context.xml index 539c9e9164f..ddb40d07626 100644 --- a/framework/cluster/src/main/resources/META-INF/cloudstack/core/spring-framework-cluster-core-context.xml +++ b/framework/cluster/src/main/resources/META-INF/cloudstack/core/spring-framework-cluster-core-context.xml @@ -28,9 +28,9 @@ > - + - + - + diff --git a/framework/cluster/src/test/java/com/cloud/cluster/ClusterManagerImplTest.java b/framework/cluster/src/test/java/com/cloud/cluster/ClusterManagerImplTest.java new file mode 100644 index 00000000000..9b1854f7348 --- /dev/null +++ b/framework/cluster/src/test/java/com/cloud/cluster/ClusterManagerImplTest.java @@ -0,0 +1,38 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.cluster; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class ClusterManagerImplTest { + @InjectMocks + ClusterManagerImpl clusterManager = new ClusterManagerImpl(); + + @Test + public void testGetSelfNodeIP() { + String ip = "1.2.3.4"; + ReflectionTestUtils.setField(clusterManager, "_clusterNodeIP", ip); + Assert.assertEquals(ip, clusterManager.getSelfNodeIP()); + } +} diff --git a/framework/cluster/src/test/java/com/cloud/cluster/ClusterServiceServletAdapterTest.java b/framework/cluster/src/test/java/com/cloud/cluster/ClusterServiceServletAdapterTest.java index 25266106f43..6f4b7d6aa9e 100644 --- a/framework/cluster/src/test/java/com/cloud/cluster/ClusterServiceServletAdapterTest.java +++ b/framework/cluster/src/test/java/com/cloud/cluster/ClusterServiceServletAdapterTest.java @@ -50,7 +50,7 @@ public class ClusterServiceServletAdapterTest { @Test public void testRunLevel() { int runLevel = clusterServiceServletAdapter.getRunLevel(); - assertTrue(runLevel == ComponentLifecycle.RUN_LEVEL_FRAMEWORK); + assertTrue(runLevel == ComponentLifecycle.RUN_LEVEL_COMPONENT); assertTrue(runLevel == clusterManagerImpl.getRunLevel()); } } diff --git a/framework/cluster/src/test/java/com/cloud/cluster/ClusterServiceServletContainerTest.java b/framework/cluster/src/test/java/com/cloud/cluster/ClusterServiceServletContainerTest.java new file mode 100644 index 00000000000..baf4e5841bd --- /dev/null +++ b/framework/cluster/src/test/java/com/cloud/cluster/ClusterServiceServletContainerTest.java @@ -0,0 +1,87 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.cluster; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.List; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLContextSpi; +import javax.net.ssl.SSLServerSocket; +import javax.net.ssl.SSLServerSocketFactory; + +import org.apache.commons.collections.CollectionUtils; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.utils.StringUtils; + +@RunWith(MockitoJUnitRunner.class) +public class ClusterServiceServletContainerTest { + + private void runGetSecuredServerSocket(String ip) { + SSLContext sslContext = Mockito.mock(SSLContext.class); + SSLContextSpi sslContextSpi = Mockito.mock(SSLContextSpi.class); + ReflectionTestUtils.setField(sslContext, "contextSpi", sslContextSpi); + SSLServerSocketFactory factory = Mockito.mock(SSLServerSocketFactory.class); + Mockito.when(sslContext.getServerSocketFactory()).thenReturn(factory); + int port = 9090; + final List socketNeedClientAuth = new ArrayList<>(); + try { + SSLServerSocket socketMock = Mockito.mock(SSLServerSocket.class); + if (StringUtils.isBlank(ip)) { + Mockito.when(factory.createServerSocket(port)).thenReturn(socketMock); + } else { + Mockito.when(factory.createServerSocket(Mockito.anyInt(), Mockito.anyInt(), + Mockito.any(InetAddress.class))).thenReturn(socketMock); + } + Mockito.doAnswer((Answer) invocationOnMock -> { + boolean needClientAuth = (boolean) invocationOnMock.getArguments()[0]; + socketNeedClientAuth.add(needClientAuth); + return null; + }).when(socketMock).setNeedClientAuth(Mockito.anyBoolean()); + SSLServerSocket socket = ClusterServiceServletContainer.getSecuredServerSocket(sslContext, ip, 9090); + if (StringUtils.isBlank(ip)) { + Mockito.verify(factory, Mockito.times(1)).createServerSocket(port); + } else { + Mockito.verify(factory, Mockito.times(1)).createServerSocket(port, 0, InetAddress.getByName(ip)); + } + Mockito.verify(socket, Mockito.times(1)).setNeedClientAuth(Mockito.anyBoolean()); + Assert.assertTrue(CollectionUtils.isNotEmpty(socketNeedClientAuth)); + Assert.assertTrue(socketNeedClientAuth.get(0)); + } catch (IOException e) { + Assert.fail("Exception occurred: " + e.getMessage()); + } + } + + @Test + public void testGetSecuredServerSocketNoIp() { + runGetSecuredServerSocket(""); + } + + @Test + public void testGetSecuredServerSocketIp() { + runGetSecuredServerSocket("1.2.3.4"); + } +} diff --git a/framework/cluster/src/test/java/com/cloud/cluster/ClusterServiceServletImplTest.java b/framework/cluster/src/test/java/com/cloud/cluster/ClusterServiceServletImplTest.java new file mode 100644 index 00000000000..361c77dbeff --- /dev/null +++ b/framework/cluster/src/test/java/com/cloud/cluster/ClusterServiceServletImplTest.java @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.cluster; + +import java.util.List; +import java.util.Optional; + +import org.apache.commons.collections.CollectionUtils; +import org.apache.http.NameValuePair; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class ClusterServiceServletImplTest { + + @InjectMocks + ClusterServiceServletImpl clusterServiceServlet = new ClusterServiceServletImpl(); + + @Test + public void testClusterServicePduPostParameters() { + List parameters = + clusterServiceServlet.getClusterServicePduPostParameters(Mockito.mock(ClusterServicePdu.class)); + Assert.assertTrue(CollectionUtils.isNotEmpty(parameters)); + Optional opt = parameters.stream().filter(x -> x.getName().equals("method")).findFirst(); + Assert.assertTrue(opt.isPresent()); + NameValuePair val = opt.get(); + Assert.assertEquals(Integer.toString(RemoteMethodConstants.METHOD_DELIVER_PDU), val.getValue()); + } + + @Test + public void testPingPostParameters() { + String peer = "1.2.3.4"; + List parameters = + clusterServiceServlet.getPingPostParameters(peer); + Assert.assertTrue(CollectionUtils.isNotEmpty(parameters)); + Optional opt = parameters.stream().filter(x -> x.getName().equals("method")).findFirst(); + Assert.assertTrue(opt.isPresent()); + NameValuePair val = opt.get(); + Assert.assertEquals(Integer.toString(RemoteMethodConstants.METHOD_PING), val.getValue()); + opt = parameters.stream().filter(x -> x.getName().equals("callingPeer")).findFirst(); + Assert.assertTrue(opt.isPresent()); + val = opt.get(); + Assert.assertEquals(peer, val.getValue()); + } +} diff --git a/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigDepot.java b/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigDepot.java index b38b30e88b8..5ee5f9dec48 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigDepot.java +++ b/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigDepot.java @@ -32,4 +32,6 @@ public interface ConfigDepot { void createOrUpdateConfigObject(String componentName, ConfigKey key, String value); boolean isNewConfig(ConfigKey configKey); + String getConfigStringValue(String key, ConfigKey.Scope scope, Long scopeId); + void invalidateConfigCache(String key, ConfigKey.Scope scope, Long scopeId); } diff --git a/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigKey.java b/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigKey.java index 46923de3c7c..36a8050754c 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigKey.java +++ b/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigKey.java @@ -19,7 +19,6 @@ package org.apache.cloudstack.framework.config; import java.sql.Date; import org.apache.cloudstack.framework.config.impl.ConfigDepotImpl; -import org.apache.cloudstack.framework.config.impl.ConfigurationVO; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; @@ -41,7 +40,7 @@ public class ConfigKey { } public enum Kind { - CSV, Order, Select + CSV, Order, Select, WhitespaceSeparatedListWithOptions } private final String _category; @@ -136,6 +135,10 @@ public class ConfigKey { this(type, name, category, defaultValue, description, isDynamic, Scope.Global, null); } + public ConfigKey(String category, Class type, String name, String defaultValue, String description, boolean isDynamic, Kind kind, String options) { + this(type, name, category, defaultValue, description, isDynamic, Scope.Global, null, null, null, null, null, kind, options); + } + public ConfigKey(String category, Class type, String name, String defaultValue, String description, boolean isDynamic, String parent) { this(type, name, category, defaultValue, description, isDynamic, Scope.Global, null, null, parent, null, null, null, null); } @@ -211,42 +214,38 @@ public class ConfigKey { public T value() { if (_value == null || isDynamic()) { - ConfigurationVO vo = (s_depot != null && s_depot.global() != null) ? s_depot.global().findById(key()) : null; - final String value = (vo != null && vo.getValue() != null) ? vo.getValue() : defaultValue(); - _value = ((value == null) ? (T)defaultValue() : valueOf(value)); + String value = s_depot != null ? s_depot.getConfigStringValue(_name, Scope.Global, null) : null; + _value = valueOf((value == null) ? defaultValue() : value); } return _value; } - public T valueIn(Long id) { + protected T valueInScope(Scope scope, Long id) { if (id == null) { return value(); } - String value = s_depot != null ? s_depot.findScopedConfigStorage(this).getConfigValue(id, this) : null; + String value = s_depot != null ? s_depot.getConfigStringValue(_name, scope, id) : null; if (value == null) { return value(); - } else { - return valueOf(value); } + return valueOf(value); + } + + public T valueIn(Long id) { + return valueInScope(_scope, id); } public T valueInDomain(Long domainId) { - if (domainId == null) { - return value(); - } - - String value = s_depot != null ? s_depot.getDomainScope(this).getConfigValue(domainId, this) : null; - if (value == null) { - return value(); - } else { - return valueOf(value); - } + return valueInScope(Scope.Domain, domainId); } @SuppressWarnings("unchecked") protected T valueOf(String value) { + if (value == null) { + return null; + } Number multiplier = 1; if (multiplier() != null) { multiplier = (Number)multiplier(); diff --git a/framework/config/src/main/java/org/apache/cloudstack/framework/config/ScopedConfigStorage.java b/framework/config/src/main/java/org/apache/cloudstack/framework/config/ScopedConfigStorage.java index f990278b45c..8126b9510a2 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/framework/config/ScopedConfigStorage.java +++ b/framework/config/src/main/java/org/apache/cloudstack/framework/config/ScopedConfigStorage.java @@ -26,5 +26,9 @@ import org.apache.cloudstack.framework.config.ConfigKey.Scope; public interface ScopedConfigStorage { Scope getScope(); - String getConfigValue(long id, ConfigKey key); + String getConfigValue(long id, String key); + + default String getConfigValue(long id, ConfigKey key) { + return getConfigValue(id, key.key()); + } } diff --git a/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java b/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java index 6884043cae2..b47370d9205 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java +++ b/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java @@ -23,6 +23,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.concurrent.TimeUnit; import javax.annotation.PostConstruct; import javax.inject.Inject; @@ -37,12 +38,14 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationGroupDao; import org.apache.cloudstack.framework.config.dao.ConfigurationSubGroupDao; import org.apache.commons.lang.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.exception.CloudRuntimeException; +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; /** * ConfigDepotImpl implements the ConfigDepot and ConfigDepotAdmin interface. @@ -73,6 +76,7 @@ import com.cloud.utils.exception.CloudRuntimeException; */ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin { protected Logger logger = LogManager.getLogger(getClass()); + protected final static long CONFIG_CACHE_EXPIRE_SECONDS = 30; @Inject ConfigurationDao _configDao; @Inject @@ -83,12 +87,17 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin { List _scopedStorages; Set _configured = Collections.synchronizedSet(new HashSet()); Set newConfigs = Collections.synchronizedSet(new HashSet<>()); + Cache configCache; private HashMap>> _allKeys = new HashMap>>(1007); HashMap>> _scopeLevelConfigsMap = new HashMap>>(); public ConfigDepotImpl() { + configCache = Caffeine.newBuilder() + .maximumSize(512) + .expireAfterWrite(CONFIG_CACHE_EXPIRE_SECONDS, TimeUnit.SECONDS) + .build(); ConfigKey.init(this); createEmptyScopeLevelMappings(); } @@ -268,6 +277,48 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin { return _configDao; } + protected String getConfigStringValueInternal(String cacheKey) { + String[] parts = cacheKey.split("-"); + String key = parts[0]; + ConfigKey.Scope scope = ConfigKey.Scope.Global; + Long scopeId = null; + try { + scope = ConfigKey.Scope.valueOf(parts[1]); + scopeId = Long.valueOf(parts[2]); + } catch (IllegalArgumentException ignored) {} + if (!ConfigKey.Scope.Global.equals(scope) && scopeId != null) { + ScopedConfigStorage scopedConfigStorage = null; + for (ScopedConfigStorage storage : _scopedStorages) { + if (storage.getScope() == scope) { + scopedConfigStorage = storage; + } + } + if (scopedConfigStorage == null) { + throw new CloudRuntimeException("Unable to find config storage for this scope: " + scope + " for " + key); + } + return scopedConfigStorage.getConfigValue(scopeId, key); + } + ConfigurationVO configurationVO = _configDao.findById(key); + if (configurationVO != null) { + return configurationVO.getValue(); + } + return null; + } + + private String getConfigCacheKey(String key, ConfigKey.Scope scope, Long scopeId) { + return String.format("%s-%s-%d", key, scope, (scopeId == null ? 0 : scopeId)); + } + + @Override + public String getConfigStringValue(String key, ConfigKey.Scope scope, Long scopeId) { + return configCache.get(getConfigCacheKey(key, scope, scopeId), this::getConfigStringValueInternal); + } + + @Override + public void invalidateConfigCache(String key, ConfigKey.Scope scope, Long scopeId) { + configCache.invalidate(getConfigCacheKey(key, scope, scopeId)); + } + public ScopedConfigStorage findScopedConfigStorage(ConfigKey config) { for (ScopedConfigStorage storage : _scopedStorages) { if (storage.getScope() == config.scope()) { diff --git a/framework/config/src/main/resources/META-INF/cloudstack/system/spring-framework-config-system-context-inheritable.xml b/framework/config/src/main/resources/META-INF/cloudstack/system/spring-framework-config-system-context-inheritable.xml index e176ec5a88b..0148fa7acc6 100644 --- a/framework/config/src/main/resources/META-INF/cloudstack/system/spring-framework-config-system-context-inheritable.xml +++ b/framework/config/src/main/resources/META-INF/cloudstack/system/spring-framework-config-system-context-inheritable.xml @@ -25,13 +25,13 @@ http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context.xsd" - > + > - + diff --git a/framework/config/src/test/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImplTest.java b/framework/config/src/test/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImplTest.java index 8dd6f71af3c..8a7da795345 100644 --- a/framework/config/src/test/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImplTest.java +++ b/framework/config/src/test/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImplTest.java @@ -23,12 +23,23 @@ import java.util.HashSet; import java.util.Set; import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.junit.Assert; import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; import org.springframework.test.util.ReflectionTestUtils; +@RunWith(MockitoJUnitRunner.class) public class ConfigDepotImplTest { + @Mock + ConfigurationDao _configDao; + + @InjectMocks private ConfigDepotImpl configDepotImpl = new ConfigDepotImpl(); @Test @@ -57,4 +68,43 @@ public class ConfigDepotImplTest { Assert.assertFalse(configDepotImpl.isNewConfig(invalidNewConfig)); } + private void runTestGetConfigStringValue(String key, String value) { + ConfigurationVO configurationVO = Mockito.mock(ConfigurationVO.class); + Mockito.when(configurationVO.getValue()).thenReturn(value); + Mockito.when(_configDao.findById(key)).thenReturn(configurationVO); + String result = configDepotImpl.getConfigStringValue(key, ConfigKey.Scope.Global, null); + Assert.assertEquals(value, result); + } + + @Test + public void testGetConfigStringValue() { + runTestGetConfigStringValue("test", "value"); + } + + private void runTestGetConfigStringValueExpiry(long wait, int configDBRetrieval) { + String key = "test1"; + String value = "expiry"; + runTestGetConfigStringValue(key, value); + try { + Thread.sleep(wait); + } catch (InterruptedException ie) { + Assert.fail(ie.getMessage()); + } + String result = configDepotImpl.getConfigStringValue(key, ConfigKey.Scope.Global, null); + Assert.assertEquals(value, result); + Mockito.verify(_configDao, Mockito.times(configDBRetrieval)).findById(key); + + } + + @Test + public void testGetConfigStringValueWithinExpiry() { + runTestGetConfigStringValueExpiry((ConfigDepotImpl.CONFIG_CACHE_EXPIRE_SECONDS * 1000 ) / 4, + 1); + } + + @Test + public void testGetConfigStringValueAfterExpiry() { + runTestGetConfigStringValueExpiry(((ConfigDepotImpl.CONFIG_CACHE_EXPIRE_SECONDS) + 5) * 1000, + 2); + } } diff --git a/framework/db/pom.xml b/framework/db/pom.xml index d4d3b6b8772..586d72f34f3 100644 --- a/framework/db/pom.xml +++ b/framework/db/pom.xml @@ -40,6 +40,10 @@ org.apache.commons commons-dbcp2 + + com.zaxxer + HikariCP + commons-io commons-io @@ -48,6 +52,10 @@ org.apache.commons commons-pool2 + + mysql + mysql-connector-java + org.apache.cloudstack cloud-utils diff --git a/framework/db/src/main/java/com/cloud/utils/db/ConnectionConcierge.java b/framework/db/src/main/java/com/cloud/utils/db/ConnectionConcierge.java index 7cf34e6955c..c7c869ba920 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/ConnectionConcierge.java +++ b/framework/db/src/main/java/com/cloud/utils/db/ConnectionConcierge.java @@ -145,7 +145,7 @@ public class ConnectionConcierge { protected String testValidity(String name, Connection conn) { if (conn != null) { synchronized (conn) { - try (PreparedStatement pstmt = conn.prepareStatement("SELECT 1");) { + try (PreparedStatement pstmt = conn.prepareStatement("/* ping */ SELECT 1");) { pstmt.executeQuery(); } catch (Throwable th) { logger.error("Unable to keep the db connection for " + name, th); diff --git a/framework/db/src/main/java/com/cloud/utils/db/Filter.java b/framework/db/src/main/java/com/cloud/utils/db/Filter.java index 15161ab058f..fb8c9ee37fc 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/Filter.java +++ b/framework/db/src/main/java/com/cloud/utils/db/Filter.java @@ -22,6 +22,7 @@ import javax.persistence.Column; import com.cloud.utils.Pair; import com.cloud.utils.ReflectUtil; +import org.apache.commons.lang3.StringUtils; /** * Try to use static initialization to help you in finding incorrect @@ -59,6 +60,11 @@ public class Filter { _orderBy = " ORDER BY RAND() LIMIT " + limit; } + public Filter(Long offset, Long limit) { + _offset = offset; + _limit = limit; + } + /** * Note that this copy constructor does not copy offset and limit. * @param that filter @@ -70,6 +76,10 @@ public class Filter { } public void addOrderBy(Class clazz, String field, boolean ascending) { + addOrderBy(clazz, field, ascending, null); + } + + public void addOrderBy(Class clazz, String field, boolean ascending, String tableAlias) { if (field == null) { return; } @@ -83,7 +93,9 @@ public class Filter { String name = column != null ? column.name() : field; StringBuilder order = new StringBuilder(); - if (column == null || column.table() == null || column.table().length() == 0) { + if (StringUtils.isNotBlank(tableAlias)) { + order.append(tableAlias); + } else if (column == null || column.table() == null || column.table().length() == 0) { order.append(DbUtil.getTableName(clazz)); } else { order.append(column.table()); diff --git a/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java b/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java index 2fc02301cb7..84750c2068c 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java +++ b/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java @@ -229,6 +229,32 @@ public interface GenericDao { */ int expunge(final SearchCriteria sc); + /** + * remove the entity bean specified by the search criteria and filter + * @param sc + * @param filter + * @return number of rows deleted + */ + int expunge(final SearchCriteria sc, final Filter filter); + + /** + * remove the entity bean specified by the search criteria and batchSize + * @param sc + * @param batchSize + * @return number of rows deleted + */ + int batchExpunge(final SearchCriteria sc, final Long batchSize); + + int expungeList(List ids); + + /** + * Delete the entity beans specified by the search criteria with a given limit + * @param sc Search criteria + * @param limit Maximum number of rows that will be affected + * @return Number of rows deleted + */ + int expunge(SearchCriteria sc, long limit); + /** * expunge the removed rows. */ diff --git a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java index a09f323905e..52a6b204ee8 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java +++ b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java @@ -20,6 +20,8 @@ import java.io.Serializable; import java.io.UnsupportedEncodingException; import java.lang.reflect.Array; import java.lang.reflect.Field; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; import java.math.BigInteger; @@ -59,9 +61,12 @@ import javax.persistence.Enumerated; import javax.persistence.Table; import javax.persistence.TableGenerator; -import com.amazonaws.util.CollectionUtils; import org.apache.commons.lang3.ArrayUtils; +import org.apache.commons.lang3.ObjectUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.exception.ExceptionUtils; +import com.amazonaws.util.CollectionUtils; import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; @@ -74,8 +79,6 @@ import com.cloud.utils.db.SearchCriteria.SelectType; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.Ip; import com.cloud.utils.net.NetUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.exception.ExceptionUtils; import net.sf.cglib.proxy.Callback; import net.sf.cglib.proxy.CallbackFilter; @@ -434,9 +437,11 @@ public abstract class GenericDaoBase extends Compone } return result; } catch (final SQLException e) { - throw new CloudRuntimeException("DB Exception on: " + pstmt, e); + logger.error("DB Exception on: " + pstmt, e); + throw new CloudRuntimeException("Unable to find on DB, due to: " + e.getLocalizedMessage()); } catch (final Exception e) { - throw new CloudRuntimeException("Caught: " + pstmt, e); + logger.error("Caught: " + pstmt, e); + throw new CloudRuntimeException("Caught error: " + e.getLocalizedMessage()); } } @@ -519,9 +524,11 @@ public abstract class GenericDaoBase extends Compone return results; } catch (final SQLException e) { - throw new CloudRuntimeException("DB Exception on: " + pstmt, e); + logger.error("DB Exception on: " + pstmt, e); + throw new CloudRuntimeException("Unable to find on DB, due to: " + e.getLocalizedMessage()); } catch (final Exception e) { - throw new CloudRuntimeException("Caught: " + pstmt, e); + logger.error("Caught: " + pstmt, e); + throw new CloudRuntimeException("Caught error: " + e.getLocalizedMessage()); } } @@ -867,8 +874,9 @@ public abstract class GenericDaoBase extends Compone ub.clear(); return result; } catch (final SQLException e) { + logger.error("DB Exception on: " + pstmt, e); handleEntityExistsException(e); - throw new CloudRuntimeException("DB Exception on: " + pstmt, e); + throw new CloudRuntimeException("Unable to update on DB, due to: " + e.getLocalizedMessage()); } } @@ -1062,7 +1070,8 @@ public abstract class GenericDaoBase extends Compone ResultSet rs = pstmt.executeQuery(); return rs.next() ? toEntityBean(rs, true) : null; } catch (SQLException e) { - throw new CloudRuntimeException("DB Exception on: " + pstmt, e); + logger.error("DB Exception on: " + pstmt, e); + throw new CloudRuntimeException("Unable to find by id on DB, due to: " + e.getLocalizedMessage()); } } @@ -1177,9 +1186,11 @@ public abstract class GenericDaoBase extends Compone } return result; } catch (final SQLException e) { - throw new CloudRuntimeException("DB Exception on: " + pstmt, e); + logger.error("DB Exception on: " + pstmt, e); + throw new CloudRuntimeException("Unable to execute on DB, due to: " + e.getLocalizedMessage()); } catch (final Exception e) { - throw new CloudRuntimeException("Caught: " + pstmt, e); + logger.error("Caught: " + pstmt, e); + throw new CloudRuntimeException("Caught error: " + e.getLocalizedMessage()); } } @@ -1228,13 +1239,20 @@ public abstract class GenericDaoBase extends Compone } return true; } catch (final SQLException e) { - throw new CloudRuntimeException("DB Exception on: " + pstmt, e); + logger.error("DB Exception on: " + pstmt, e); + throw new CloudRuntimeException("Unable to expunge on DB, due to: " + e.getLocalizedMessage()); } } // FIXME: Does not work for joins. @Override - public int expunge(final SearchCriteria sc) { + public int expunge(final SearchCriteria sc, long limit) { + Filter filter = new Filter(limit); + return expunge(sc, filter); + } + + @Override + public int expunge(final SearchCriteria sc, final Filter filter) { if (sc == null) { throw new CloudRuntimeException("Call to throw new expunge with null search Criteria"); } @@ -1246,6 +1264,7 @@ public abstract class GenericDaoBase extends Compone if (sc != null && sc.getWhereClause().length() > 0) { str.append(sc.getWhereClause()); } + addFilter(str, filter); final String sql = str.toString(); @@ -1259,11 +1278,54 @@ public abstract class GenericDaoBase extends Compone } return pstmt.executeUpdate(); } catch (final SQLException e) { - throw new CloudRuntimeException("DB Exception on: " + pstmt, e); + logger.error("DB Exception on: " + pstmt, e); + throw new CloudRuntimeException("Unable to expunge on DB, due to: " + e.getLocalizedMessage()); } catch (final Exception e) { - throw new CloudRuntimeException("Caught: " + pstmt, e); + logger.error("Caught: " + pstmt, e); + throw new CloudRuntimeException("Caught error: " + e.getLocalizedMessage()); } } + @Override + public int expunge(final SearchCriteria sc) { + return expunge(sc, null); + } + + @Override + public int batchExpunge(final SearchCriteria sc, final Long batchSize) { + Filter filter = null; + final long batchSizeFinal = ObjectUtils.defaultIfNull(batchSize, 0L); + if (batchSizeFinal > 0) { + filter = new Filter(batchSizeFinal); + } + int expunged = 0; + int currentExpunged = 0; + do { + currentExpunged = expunge(sc, filter); + expunged += currentExpunged; + } while (batchSizeFinal > 0 && currentExpunged >= batchSizeFinal); + return expunged; + } + + @Override + public int expungeList(final List ids) { + if (org.apache.commons.collections.CollectionUtils.isEmpty(ids)) { + return 0; + } + SearchBuilder sb = createSearchBuilder(); + Object obj = null; + try { + Method m = sb.entity().getClass().getMethod("getId"); + obj = m.invoke(sb.entity()); + } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException ignored) {} + if (obj == null) { + logger.warn(String.format("Unable to get ID object for entity: %s", _entityBeanType.getSimpleName())); + return 0; + } + sb.and("id", obj, SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("id", ids.toArray()); + return expunge(sc); + } @DB() protected StringBuilder createPartialSelectSql(SearchCriteria sc, final boolean whereClause, final boolean enableQueryCache) { @@ -1499,7 +1561,7 @@ public abstract class GenericDaoBase extends Compone return entity; } - assert false : "Can't call persit if you don't have primary key"; + assert false : "Can't call persist if you don't have primary key"; } ID id = null; @@ -1555,8 +1617,9 @@ public abstract class GenericDaoBase extends Compone } txn.commit(); } catch (final SQLException e) { + logger.error("DB Exception on: " + pstmt, e); handleEntityExistsException(e); - throw new CloudRuntimeException("DB Exception on: " + pstmt, e); + throw new CloudRuntimeException("Unable to persist on DB, due to: " + e.getLocalizedMessage()); } catch (IllegalArgumentException e) { throw new CloudRuntimeException("Problem with getting the ec attribute ", e); } catch (IllegalAccessException e) { @@ -1905,7 +1968,8 @@ public abstract class GenericDaoBase extends Compone pstmt.executeUpdate(); txn.commit(); } catch (final SQLException e) { - throw new CloudRuntimeException("DB Exception on " + pstmt, e); + logger.error("DB Exception on: " + pstmt, e); + throw new CloudRuntimeException("Unable to expunge on DB, due to: " + e.getLocalizedMessage()); } } @@ -1933,7 +1997,8 @@ public abstract class GenericDaoBase extends Compone } return result > 0; } catch (final SQLException e) { - throw new CloudRuntimeException("DB Exception on: " + pstmt, e); + logger.error("DB Exception on: " + pstmt, e); + throw new CloudRuntimeException("Unable to unremove on DB, due to: " + e.getLocalizedMessage()); } } @@ -1976,7 +2041,8 @@ public abstract class GenericDaoBase extends Compone } return result > 0; } catch (final SQLException e) { - throw new CloudRuntimeException("DB Exception on: " + pstmt, e); + logger.error("DB Exception on: " + pstmt, e); + throw new CloudRuntimeException("Unable to remove on DB, due to: " + e.getLocalizedMessage()); } } @@ -2124,9 +2190,11 @@ public abstract class GenericDaoBase extends Compone } return 0; } catch (final SQLException e) { - throw new CloudRuntimeException("DB Exception on: " + pstmt, e); + logger.error("DB Exception on: " + pstmt, e); + throw new CloudRuntimeException("Unable to get count on DB, due to: " + e.getLocalizedMessage()); } catch (final Exception e) { - throw new CloudRuntimeException("Caught: " + pstmt, e); + logger.error("Caught: " + pstmt, e); + throw new CloudRuntimeException("Caught error: " + e.getLocalizedMessage()); } } @@ -2183,9 +2251,11 @@ public abstract class GenericDaoBase extends Compone } return 0; } catch (final SQLException e) { - throw new CloudRuntimeException("DB Exception in executing: " + sql, e); + logger.error("DB Exception in executing: " + sql, e); + throw new CloudRuntimeException("Unable to get count on DB, due to: " + e.getLocalizedMessage()); } catch (final Exception e) { - throw new CloudRuntimeException("Caught exception in : " + sql, e); + logger.error("Caught exception in : " + sql, e); + throw new CloudRuntimeException("Caught error: " + e.getLocalizedMessage()); } } @@ -2258,9 +2328,11 @@ public abstract class GenericDaoBase extends Compone } return 0; } catch (final SQLException e) { - throw new CloudRuntimeException("DB Exception on: " + pstmt, e); + logger.error("DB Exception on: " + pstmt, e); + throw new CloudRuntimeException("Unable to get count on DB, due to: " + e.getLocalizedMessage()); } catch (final Exception e) { - throw new CloudRuntimeException("Caught: " + pstmt, e); + logger.error("Caught: " + pstmt, e); + throw new CloudRuntimeException("Caught error: " + e.getLocalizedMessage()); } } diff --git a/framework/db/src/main/java/com/cloud/utils/db/TransactionLegacy.java b/framework/db/src/main/java/com/cloud/utils/db/TransactionLegacy.java index 00fa8e4c0d5..88af397c06a 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/TransactionLegacy.java +++ b/framework/db/src/main/java/com/cloud/utils/db/TransactionLegacy.java @@ -38,17 +38,20 @@ import org.apache.commons.dbcp2.DriverManagerConnectionFactory; import org.apache.commons.dbcp2.PoolableConnection; import org.apache.commons.dbcp2.PoolableConnectionFactory; import org.apache.commons.dbcp2.PoolingDataSource; +import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.pool2.ObjectPool; import org.apache.commons.pool2.impl.GenericObjectPool; import org.apache.commons.pool2.impl.GenericObjectPoolConfig; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import com.cloud.utils.Pair; import com.cloud.utils.PropertiesUtil; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.mgmt.JmxUtil; +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; /** * Transaction abstracts away the Connection object in JDBC. It allows the @@ -95,6 +98,8 @@ public class TransactionLegacy implements Closeable { } } + private static final String CONNECTION_POOL_LIB_DBCP = "dbcp"; + private final LinkedList _stack; private long _id; @@ -1020,6 +1025,21 @@ public class TransactionLegacy implements Closeable { } } + private static T parseNumber(String value, Class type) { + if (value == null) { + return null; + } + try { + if (type.equals(Long.class)) { + return type.cast(Long.parseLong(value)); + } else { + return type.cast(Integer.parseInt(value)); + } + } catch (NumberFormatException ignored) { + return null; + } + } + @SuppressWarnings({"rawtypes", "unchecked"}) public static void initDataSource(Properties dbProps) { try { @@ -1030,9 +1050,12 @@ public class TransactionLegacy implements Closeable { LOGGER.info("Is Data Base High Availiability enabled? Ans : " + s_dbHAEnabled); String loadBalanceStrategy = dbProps.getProperty("db.ha.loadBalanceStrategy"); // FIXME: If params are missing...default them???? - final int cloudMaxActive = Integer.parseInt(dbProps.getProperty("db.cloud.maxActive")); - final int cloudMaxIdle = Integer.parseInt(dbProps.getProperty("db.cloud.maxIdle")); - final long cloudMaxWait = Long.parseLong(dbProps.getProperty("db.cloud.maxWait")); + final Integer cloudMaxActive = parseNumber(dbProps.getProperty("db.cloud.maxActive"), Integer.class); + final Integer cloudMaxIdle = parseNumber(dbProps.getProperty("db.cloud.maxIdle"), Integer.class); + final Long cloudMaxWait = parseNumber(dbProps.getProperty("db.cloud.maxWait"), Long.class); + final Integer cloudMinIdleConnections = parseNumber(dbProps.getProperty("db.cloud.minIdleConnections"), Integer.class); + final Long cloudConnectionTimeout = parseNumber(dbProps.getProperty("db.cloud.connectionTimeout"), Long.class); + final Long cloudKeepAliveTimeout = parseNumber(dbProps.getProperty("db.cloud.keepAliveTime"), Long.class); final String cloudUsername = dbProps.getProperty("db.cloud.username"); final String cloudPassword = dbProps.getProperty("db.cloud.password"); final String cloudValidationQuery = dbProps.getProperty("db.cloud.validationQuery"); @@ -1071,14 +1094,19 @@ public class TransactionLegacy implements Closeable { DriverLoader.loadDriver(cloudUriAndDriver.second()); // Default Data Source for CloudStack - s_ds = createDataSource(cloudUriAndDriver.first(), cloudUsername, cloudPassword, cloudMaxActive, cloudMaxIdle, cloudMaxWait, - cloudTimeBtwEvictionRunsMillis, cloudMinEvcitableIdleTimeMillis, cloudTestWhileIdle, cloudTestOnBorrow, - cloudValidationQuery, isolationLevel); + s_ds = createDataSource(dbProps.getProperty("db.cloud.connectionPoolLib"), cloudUriAndDriver.first(), + cloudUsername, cloudPassword, cloudMaxActive, cloudMaxIdle, cloudMaxWait, + cloudTimeBtwEvictionRunsMillis, cloudMinEvcitableIdleTimeMillis, cloudTestWhileIdle, + cloudTestOnBorrow, cloudValidationQuery, cloudMinIdleConnections, cloudConnectionTimeout, + cloudKeepAliveTimeout, isolationLevel, "cloud"); // Configure the usage db - final int usageMaxActive = Integer.parseInt(dbProps.getProperty("db.usage.maxActive")); - final int usageMaxIdle = Integer.parseInt(dbProps.getProperty("db.usage.maxIdle")); - final long usageMaxWait = Long.parseLong(dbProps.getProperty("db.usage.maxWait")); + final Integer usageMaxActive = parseNumber(dbProps.getProperty("db.usage.maxActive"), Integer.class); + final Integer usageMaxIdle = parseNumber(dbProps.getProperty("db.usage.maxIdle"), Integer.class); + final Long usageMaxWait = parseNumber(dbProps.getProperty("db.usage.maxWait"), Long.class); + final Integer usageMinIdleConnections = parseNumber(dbProps.getProperty("db.usage.minIdleConnections"), Integer.class); + final Long usageConnectionTimeout = parseNumber(dbProps.getProperty("db.usage.connectionTimeout"), Long.class); + final Long usageKeepAliveTimeout = parseNumber(dbProps.getProperty("db.usage.keepAliveTime"), Long.class); final String usageUsername = dbProps.getProperty("db.usage.username"); final String usagePassword = dbProps.getProperty("db.usage.password"); @@ -1087,15 +1115,19 @@ public class TransactionLegacy implements Closeable { DriverLoader.loadDriver(usageUriAndDriver.second()); // Data Source for usage server - s_usageDS = createDataSource(usageUriAndDriver.first(), usageUsername, usagePassword, - usageMaxActive, usageMaxIdle, usageMaxWait, null, null, null, null, - null, isolationLevel); + s_usageDS = createDataSource(dbProps.getProperty("db.usage.connectionPoolLib"), usageUriAndDriver.first(), + usageUsername, usagePassword, usageMaxActive, usageMaxIdle, usageMaxWait, null, + null, null, null, null, + usageMinIdleConnections, usageConnectionTimeout, usageKeepAliveTimeout, isolationLevel, "usage"); try { // Configure the simulator db - final int simulatorMaxActive = Integer.parseInt(dbProps.getProperty("db.simulator.maxActive")); - final int simulatorMaxIdle = Integer.parseInt(dbProps.getProperty("db.simulator.maxIdle")); - final long simulatorMaxWait = Long.parseLong(dbProps.getProperty("db.simulator.maxWait")); + final Integer simulatorMaxActive = parseNumber(dbProps.getProperty("db.simulator.maxActive"), Integer.class); + final Integer simulatorMaxIdle = parseNumber(dbProps.getProperty("db.simulator.maxIdle"), Integer.class); + final Long simulatorMaxWait = parseNumber(dbProps.getProperty("db.simulator.maxWait"), Long.class); + final Integer simulatorMinIdleConnections = parseNumber(dbProps.getProperty("db.simulator.minIdleConnections"), Integer.class); + final Long simulatorConnectionTimeout = parseNumber(dbProps.getProperty("db.simulator.connectionTimeout"), Long.class); + final Long simulatorKeepAliveTimeout = parseNumber(dbProps.getProperty("db.simulator.keepAliveTime"), Long.class); final String simulatorUsername = dbProps.getProperty("db.simulator.username"); final String simulatorPassword = dbProps.getProperty("db.simulator.password"); @@ -1122,15 +1154,18 @@ public class TransactionLegacy implements Closeable { DriverLoader.loadDriver(simulatorDriver); - s_simulatorDS = createDataSource(simulatorConnectionUri, simulatorUsername, simulatorPassword, - simulatorMaxActive, simulatorMaxIdle, simulatorMaxWait, null, null, null, null, cloudValidationQuery, isolationLevel); + s_simulatorDS = createDataSource(dbProps.getProperty("db.simulator.connectionPoolLib"), + simulatorConnectionUri, simulatorUsername, simulatorPassword, simulatorMaxActive, + simulatorMaxIdle, simulatorMaxWait, null, null, null, null, + cloudValidationQuery, simulatorMinIdleConnections, simulatorConnectionTimeout, + simulatorKeepAliveTimeout, isolationLevel, "simulator"); } catch (Exception e) { LOGGER.debug("Simulator DB properties are not available. Not initializing simulator DS"); } } catch (final Exception e) { - s_ds = getDefaultDataSource("cloud"); - s_usageDS = getDefaultDataSource("cloud_usage"); - s_simulatorDS = getDefaultDataSource("cloud_simulator"); + s_ds = getDefaultDataSource(dbProps.getProperty("db.cloud.connectionPoolLib"), "cloud"); + s_usageDS = getDefaultDataSource(dbProps.getProperty("db.usage.connectionPoolLib"), "cloud_usage"); + s_simulatorDS = getDefaultDataSource(dbProps.getProperty("db.simulator.connectionPoolLib"), "simulator"); LOGGER.warn( "Unable to load db configuration, using defaults with 5 connections. Falling back on assumed datasource on localhost:3306 using username:password=cloud:cloud. Please check your configuration", e); @@ -1222,11 +1257,71 @@ public class TransactionLegacy implements Closeable { /** * Creates a data source */ - private static DataSource createDataSource(String uri, String username, String password, + private static DataSource createDataSource(String connectionPoolLib, String uri, String username, String password, + Integer maxActive, Integer maxIdle, Long maxWait, Long timeBtwnEvictionRuns, Long minEvictableIdleTime, + Boolean testWhileIdle, Boolean testOnBorrow, String validationQuery, Integer minIdleConnections, + Long connectionTimeout, Long keepAliveTime, Integer isolationLevel, String dsName) { + LOGGER.debug("Creating datasource for database: {} with connection pool lib: {}", dsName, + connectionPoolLib); + if (CONNECTION_POOL_LIB_DBCP.equals(connectionPoolLib)) { + return createDbcpDataSource(uri, username, password, maxActive, maxIdle, maxWait, timeBtwnEvictionRuns, + minEvictableIdleTime, testWhileIdle, testOnBorrow, validationQuery, isolationLevel); + } + return createHikaricpDataSource(uri, username, password, maxActive, maxIdle, maxWait, minIdleConnections, + connectionTimeout, keepAliveTime, isolationLevel, dsName); + } + + private static DataSource createHikaricpDataSource(String uri, String username, String password, Integer maxActive, Integer maxIdle, Long maxWait, - Long timeBtwnEvictionRuns, Long minEvictableIdleTime, - Boolean testWhileIdle, Boolean testOnBorrow, - String validationQuery, Integer isolationLevel) { + Integer minIdleConnections, Long connectionTimeout, Long keepAliveTime, + Integer isolationLevel, String dsName) { + HikariConfig config = new HikariConfig(); + config.setJdbcUrl(uri); + config.setUsername(username); + config.setPassword(password); + + config.setPoolName(dsName); + + // Connection pool properties + config.setMaximumPoolSize(ObjectUtils.defaultIfNull(maxActive, 250)); + config.setIdleTimeout(ObjectUtils.defaultIfNull(maxIdle, 30) * 1000); + config.setMaxLifetime(ObjectUtils.defaultIfNull(maxWait, 600000L)); + config.setMinimumIdle(ObjectUtils.defaultIfNull(minIdleConnections, 5)); + config.setConnectionTimeout(ObjectUtils.defaultIfNull(connectionTimeout, 30000L)); + config.setKeepaliveTime(ObjectUtils.defaultIfNull(keepAliveTime, 600000L)); + + String isolationLevelString = "TRANSACTION_READ_COMMITTED"; + if (isolationLevel == Connection.TRANSACTION_SERIALIZABLE) { + isolationLevelString = "TRANSACTION_SERIALIZABLE"; + } else if (isolationLevel == Connection.TRANSACTION_READ_UNCOMMITTED) { + isolationLevelString = "TRANSACTION_READ_UNCOMMITTED"; + } else if (isolationLevel == Connection.TRANSACTION_REPEATABLE_READ) { + isolationLevelString = "TRANSACTION_REPEATABLE_READ"; + } + config.setTransactionIsolation(isolationLevelString); + + // Standard datasource config for MySQL + config.addDataSourceProperty("cachePrepStmts", "true"); + config.addDataSourceProperty("prepStmtCacheSize", "250"); + config.addDataSourceProperty("prepStmtCacheSqlLimit", "2048"); + // Additional config for MySQL + config.addDataSourceProperty("useServerPrepStmts", "true"); + config.addDataSourceProperty("useLocalSessionState", "true"); + config.addDataSourceProperty("rewriteBatchedStatements", "true"); + config.addDataSourceProperty("cacheResultSetMetadata", "true"); + config.addDataSourceProperty("cacheServerConfiguration", "true"); + config.addDataSourceProperty("elideSetAutoCommits", "true"); + config.addDataSourceProperty("maintainTimeStats", "false"); + + HikariDataSource dataSource = new HikariDataSource(config); + return dataSource; + } + + private static DataSource createDbcpDataSource(String uri, String username, String password, + Integer maxActive, Integer maxIdle, Long maxWait, + Long timeBtwnEvictionRuns, Long minEvictableIdleTime, + Boolean testWhileIdle, Boolean testOnBorrow, + String validationQuery, Integer isolationLevel) { ConnectionFactory connectionFactory = new DriverManagerConnectionFactory(uri, username, password); PoolableConnectionFactory poolableConnectionFactory = new PoolableConnectionFactory(connectionFactory, null); GenericObjectPoolConfig config = createPoolConfig(maxActive, maxIdle, maxWait, timeBtwnEvictionRuns, minEvictableIdleTime, testWhileIdle, testOnBorrow); @@ -1267,6 +1362,44 @@ public class TransactionLegacy implements Closeable { return config; } + private static DataSource getDefaultDataSource(final String connectionPoolLib, final String database) { + LOGGER.debug("Creating default datasource for database: {} with connection pool lib: {}", + database, connectionPoolLib); + if (CONNECTION_POOL_LIB_DBCP.equalsIgnoreCase(connectionPoolLib)) { + return getDefaultDbcpDataSource(database); + } + return getDefaultHikaricpDataSource(database); + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + private static DataSource getDefaultHikaricpDataSource(final String database) { + HikariConfig config = new HikariConfig(); + config.setJdbcUrl("jdbc:mysql://localhost:3306/" + database + "?" + CONNECTION_PARAMS); + config.setUsername("cloud"); + config.setPassword("cloud"); + config.setPoolName(database); + config.setDriverClassName("com.mysql.cj.jdbc.Driver"); + config.setMaximumPoolSize(250); + config.setConnectionTimeout(1000); + config.setIdleTimeout(1000); + config.setKeepaliveTime(1000); + config.setMaxLifetime(1000); + config.setTransactionIsolation("TRANSACTION_READ_COMMITTED"); + config.setInitializationFailTimeout(-1L); + config.addDataSourceProperty("cachePrepStmts", "true"); + config.addDataSourceProperty("prepStmtCacheSize", "250"); + config.addDataSourceProperty("prepStmtCacheSqlLimit", "2048"); + return new HikariDataSource(config); + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + private static DataSource getDefaultDbcpDataSource(final String database) { + final ConnectionFactory connectionFactory = new DriverManagerConnectionFactory("jdbc:mysql://localhost:3306/" + database + "?" + CONNECTION_PARAMS, "cloud", "cloud"); + final PoolableConnectionFactory poolableConnectionFactory = new PoolableConnectionFactory(connectionFactory, null); + final GenericObjectPool connectionPool = new GenericObjectPool(poolableConnectionFactory); + return new PoolingDataSource(connectionPool); + } + private static String getDBHAParams(String dbName, Properties dbProps) { StringBuilder sb = new StringBuilder(); sb.append("failOverReadOnly=" + dbProps.getProperty("db." + dbName + ".failOverReadOnly")); @@ -1278,14 +1411,6 @@ public class TransactionLegacy implements Closeable { return sb.toString(); } - @SuppressWarnings({"unchecked", "rawtypes"}) - private static DataSource getDefaultDataSource(final String database) { - final ConnectionFactory connectionFactory = new DriverManagerConnectionFactory("jdbc:mysql://localhost:3306/" + database + "?" + CONNECTION_PARAMS, "cloud", "cloud"); - final PoolableConnectionFactory poolableConnectionFactory = new PoolableConnectionFactory(connectionFactory, null); - final GenericObjectPool connectionPool = new GenericObjectPool(poolableConnectionFactory); - return new PoolingDataSource(connectionPool); - } - /** * Used for unit testing primarily * diff --git a/framework/db/src/main/resources/META-INF/cloudstack/system/spring-framework-db-system-context.xml b/framework/db/src/main/resources/META-INF/cloudstack/system/spring-framework-db-system-context.xml index f532a5cdc6c..84f81f3b191 100644 --- a/framework/db/src/main/resources/META-INF/cloudstack/system/spring-framework-db-system-context.xml +++ b/framework/db/src/main/resources/META-INF/cloudstack/system/spring-framework-db-system-context.xml @@ -28,5 +28,5 @@ > - + diff --git a/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java b/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java index 4a3eaf9e68c..7a14f385fa1 100644 --- a/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java +++ b/framework/events/src/main/java/org/apache/cloudstack/framework/events/Event.java @@ -20,22 +20,49 @@ package org.apache.cloudstack.framework.events; import com.google.gson.Gson; +import com.google.gson.annotations.Expose; public class Event { + @Expose(serialize = false, deserialize = false) + Long eventId; + @Expose(serialize = false, deserialize = false) + String eventUuid; String eventCategory; String eventType; String eventSource; String resourceType; String resourceUUID; String description; + @Expose(serialize = false, deserialize = false) + Long resourceAccountId; + @Expose(serialize = false, deserialize = false) + String resourceAccountUuid; + @Expose(serialize = false, deserialize = false) + Long resourceDomainId; public Event(String eventSource, String eventCategory, String eventType, String resourceType, String resourceUUID) { - this.eventCategory = eventCategory; - this.eventType = eventType; - this.eventSource = eventSource; - this.resourceType = resourceType; - this.resourceUUID = resourceUUID; + setEventCategory(eventCategory); + setEventType(eventType); + setEventSource(eventSource); + setResourceType(resourceType); + setResourceUUID(resourceUUID); + } + + public Long getEventId() { + return eventId; + } + + public void setEventId(Long eventId) { + this.eventId = eventId; + } + + public String getEventUuid() { + return eventUuid; + } + + public void setEventUuid(String eventUuid) { + this.eventUuid = eventUuid; } public String getEventCategory() { @@ -68,7 +95,7 @@ public class Event { public void setDescription(Object message) { Gson gson = new Gson(); - this.description = gson.toJson(message).toString(); + this.description = gson.toJson(message); } public void setDescription(String description) { @@ -90,4 +117,28 @@ public class Event { public String getResourceUUID() { return resourceUUID; } + + public Long getResourceAccountId() { + return resourceAccountId; + } + + public void setResourceAccountId(Long resourceAccountId) { + this.resourceAccountId = resourceAccountId; + } + + public String getResourceAccountUuid() { + return resourceAccountUuid; + } + + public void setResourceAccountUuid(String resourceAccountUuid) { + this.resourceAccountUuid = resourceAccountUuid; + } + + public Long getResourceDomainId() { + return resourceDomainId; + } + + public void setResourceDomainId(Long resourceDomainId) { + this.resourceDomainId = resourceDomainId; + } } diff --git a/framework/events/src/main/java/org/apache/cloudstack/framework/events/EventBus.java b/framework/events/src/main/java/org/apache/cloudstack/framework/events/EventBus.java index 8c8c08fcfdf..6cd6256ce81 100644 --- a/framework/events/src/main/java/org/apache/cloudstack/framework/events/EventBus.java +++ b/framework/events/src/main/java/org/apache/cloudstack/framework/events/EventBus.java @@ -27,6 +27,8 @@ import java.util.UUID; */ public interface EventBus { + String getName(); + /** * publish an event on to the event bus * diff --git a/framework/events/src/main/java/org/apache/cloudstack/framework/events/EventDistributor.java b/framework/events/src/main/java/org/apache/cloudstack/framework/events/EventDistributor.java new file mode 100644 index 00000000000..01185359d6f --- /dev/null +++ b/framework/events/src/main/java/org/apache/cloudstack/framework/events/EventDistributor.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.cloudstack.framework.events; + +import java.util.Map; + +import com.cloud.utils.component.Manager; + +public interface EventDistributor extends Manager { + /** + * Publish an event on to all the available event buses + * + * @param event event that needs to be published on the event bus + * @return Map of bus names and EventBusException for buses that failed with + * exception + */ + Map publish(Event event); +} diff --git a/framework/events/src/main/java/org/apache/cloudstack/framework/events/EventDistributorImpl.java b/framework/events/src/main/java/org/apache/cloudstack/framework/events/EventDistributorImpl.java new file mode 100644 index 00000000000..a67ff5cc926 --- /dev/null +++ b/framework/events/src/main/java/org/apache/cloudstack/framework/events/EventDistributorImpl.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.cloudstack.framework.events; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.annotation.PostConstruct; + +import org.apache.commons.lang3.StringUtils; + +import com.cloud.utils.component.ManagerBase; + +public class EventDistributorImpl extends ManagerBase implements EventDistributor { + + List eventBuses; + + public void setEventBuses(List eventBuses) { + this.eventBuses = eventBuses; + } + + @PostConstruct + public void init() { + logger.trace("Found {} event buses : {}", () -> eventBuses.size(), + () -> StringUtils.join(eventBuses.stream().map(x->x.getClass().getName()).toArray())); + } + + @Override + public Map publish(Event event) { + Map exceptions = new HashMap<>(); + if (event == null) { + return exceptions; + } + logger.trace("Publishing event [category: {}, type: {}]: {} to {} event buses", + event.getEventCategory(), event.getEventType(), + event.getDescription(), eventBuses.size()); + for (EventBus bus : eventBuses) { + try { + bus.publish(event); + } catch (EventBusException e) { + logger.warn("Failed to publish event [category: {}, type: {}] on bus {}", + event.getEventCategory(), event.getEventType(), bus.getName()); + logger.trace(event.getDescription()); + exceptions.put(bus.getName(), e); + } + } + return exceptions; + } + +} diff --git a/framework/events/src/test/java/org/apache/cloudstack/framework/events/EventDistributorImplTest.java b/framework/events/src/test/java/org/apache/cloudstack/framework/events/EventDistributorImplTest.java new file mode 100644 index 00000000000..8a8dd91b9d8 --- /dev/null +++ b/framework/events/src/test/java/org/apache/cloudstack/framework/events/EventDistributorImplTest.java @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.framework.events; + +import java.util.List; +import java.util.Map; + +import org.apache.commons.collections.MapUtils; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class EventDistributorImplTest { + + @InjectMocks + EventDistributorImpl eventDistributor = new EventDistributorImpl(); + + @Test + public void testSetEventBuses() { + Assert.assertNull(ReflectionTestUtils.getField(eventDistributor, "eventBuses")); + EventBus eventBus = Mockito.mock(EventBus.class); + EventBus eventBus1 = Mockito.mock(EventBus.class); + eventDistributor.setEventBuses(List.of(eventBus, eventBus1)); + Assert.assertNotNull(ReflectionTestUtils.getField(eventDistributor, "eventBuses")); + } + + @Test + public void testPublishNullEvent() { + Map exceptionMap = eventDistributor.publish(null); + Assert.assertTrue(MapUtils.isEmpty(exceptionMap)); + } + + @Test + public void testPublishOneReturnsException() throws EventBusException { + String busName = "Test"; + EventBus eventBus = Mockito.mock(EventBus.class); + Mockito.doReturn(busName).when(eventBus).getName(); + Mockito.doThrow(EventBusException.class).when(eventBus).publish(Mockito.any(Event.class)); + EventBus eventBus1 = Mockito.mock(EventBus.class); + Mockito.doNothing().when(eventBus1).publish(Mockito.any(Event.class)); + eventDistributor.eventBuses = List.of(eventBus, eventBus1); + Map exceptionMap = eventDistributor.publish(Mockito.mock(Event.class)); + Assert.assertTrue(MapUtils.isNotEmpty(exceptionMap)); + Assert.assertEquals(1, exceptionMap.size()); + Assert.assertTrue(exceptionMap.containsKey(busName)); + } +} diff --git a/framework/ipc/src/main/resources/META-INF/cloudstack/core/spring-framework-ipc-core-context.xml b/framework/ipc/src/main/resources/META-INF/cloudstack/core/spring-framework-ipc-core-context.xml index 926a84a33ad..9c9df4f579f 100644 --- a/framework/ipc/src/main/resources/META-INF/cloudstack/core/spring-framework-ipc-core-context.xml +++ b/framework/ipc/src/main/resources/META-INF/cloudstack/core/spring-framework-ipc-core-context.xml @@ -25,7 +25,7 @@ http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context.xsd" - > + > @@ -47,5 +47,5 @@ - + diff --git a/framework/ipc/src/test/resources/SampleManagementServerAppContext.xml b/framework/ipc/src/test/resources/SampleManagementServerAppContext.xml index fb21f2c2f50..f6afd1620f8 100644 --- a/framework/ipc/src/test/resources/SampleManagementServerAppContext.xml +++ b/framework/ipc/src/test/resources/SampleManagementServerAppContext.xml @@ -18,13 +18,13 @@ under the License. --> @@ -51,12 +51,12 @@ - + - + diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java index 89601e6b5d2..b3bfda0334c 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java @@ -39,4 +39,5 @@ public interface VmWorkJobDao extends GenericDao { void expungeCompletedWorkJobs(Date cutDate); void expungeLeftoverWorkJobs(long msid); + int expungeByVmList(List vmIds, Long batchSize); } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java index e66221cc8fe..3b167498a37 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java @@ -24,10 +24,10 @@ import java.util.List; import javax.annotation.PostConstruct; import javax.inject.Inject; - import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO.Step; import org.apache.cloudstack.jobs.JobInfo; +import org.apache.commons.collections.CollectionUtils; import com.cloud.utils.DateUtil; import com.cloud.utils.db.Filter; @@ -212,4 +212,16 @@ public class VmWorkJobDaoImpl extends GenericDaoBase implemen } }); } + + @Override + public int expungeByVmList(List vmIds, Long batchSize) { + if (CollectionUtils.isEmpty(vmIds)) { + return 0; + } + SearchBuilder sb = createSearchBuilder(); + sb.and("vmIds", sb.entity().getVmInstanceId(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("vmIds", vmIds.toArray()); + return batchExpunge(sc, batchSize); + } } diff --git a/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java b/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java new file mode 100644 index 00000000000..3e2bc15b1e0 --- /dev/null +++ b/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.framework.jobs.dao; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class VmWorkJobDaoImplTest { + + @Spy + VmWorkJobDaoImpl vmWorkJobDaoImpl; + + @Test + public void testExpungeByVmListNoVms() { + Assert.assertEquals(0, vmWorkJobDaoImpl.expungeByVmList( + new ArrayList<>(), 100L)); + Assert.assertEquals(0, vmWorkJobDaoImpl.expungeByVmList( + null, 100L)); + } + + @Test + public void testExpungeByVmList() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doAnswer((Answer) invocationOnMock -> { + Long batchSize = (Long)invocationOnMock.getArguments()[1]; + return batchSize == null ? 0 : batchSize.intValue(); + }).when(vmWorkJobDaoImpl).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); + Mockito.when(vmWorkJobDaoImpl.createSearchBuilder()).thenReturn(sb); + final VmWorkJobVO mockedVO = Mockito.mock(VmWorkJobVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + List vmIds = List.of(1L, 2L); + Object[] array = vmIds.toArray(); + Long batchSize = 50L; + Assert.assertEquals(batchSize.intValue(), vmWorkJobDaoImpl.expungeByVmList(List.of(1L, 2L), batchSize)); + Mockito.verify(sc).setParameters("vmIds", array); + Mockito.verify(vmWorkJobDaoImpl, Mockito.times(1)) + .batchExpunge(sc, batchSize); + } +} diff --git a/framework/jobs/src/test/resources/AsyncJobManagerTestContext.xml b/framework/jobs/src/test/resources/AsyncJobManagerTestContext.xml index 4e5d7675877..e838c8da34d 100644 --- a/framework/jobs/src/test/resources/AsyncJobManagerTestContext.xml +++ b/framework/jobs/src/test/resources/AsyncJobManagerTestContext.xml @@ -22,7 +22,7 @@ xmlns:tx="http://www.springframework.org/schema/tx" xmlns:aop="http://www.springframework.org/schema/aop" xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd - http://www.springframework.org/schema/tx + http://www.springframework.org/schema/tx http://www.springframework.org/schema/tx/spring-tx.xsd http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd @@ -34,5 +34,5 @@ class="org.apache.cloudstack.framework.jobs.AsyncJobManagerTestConfiguration" /> - + diff --git a/framework/jobs/src/test/resources/commonContext.xml b/framework/jobs/src/test/resources/commonContext.xml index 80d57f1d5f8..b19bd755b48 100644 --- a/framework/jobs/src/test/resources/commonContext.xml +++ b/framework/jobs/src/test/resources/commonContext.xml @@ -1,19 +1,19 @@ - > periods = accountQuotaUsages.stream() + .map(quotaUsageVO -> new Pair<>(quotaUsageVO.getStartDate(), quotaUsageVO.getEndDate())) + .collect(Collectors.toCollection(LinkedHashSet::new)); + + logger.info(String.format("Processing quota balance for account[{}] between [{}] and [{}].", accountToString, startDate, lastQuotaUsageEndDate)); - BigDecimal aggregatedUsage = BigDecimal.ZERO; long accountId = accountVo.getAccountId(); long domainId = accountVo.getDomainId(); + BigDecimal accountBalance = retrieveBalanceForUsageCalculation(accountId, domainId, startDate, accountToString); - aggregatedUsage = getUsageValueAccordingToLastQuotaUsageEntryAndLastQuotaBalance(accountId, domainId, startDate, endDate, aggregatedUsage, accountToString); + for (Pair period : periods) { + startDate = period.first(); + endDate = period.second(); - for (QuotaUsageVO quotaUsage : accountQuotaUsages) { - Date quotaUsageStartDate = quotaUsage.getStartDate(); - Date quotaUsageEndDate = quotaUsage.getEndDate(); - BigDecimal quotaUsed = quotaUsage.getQuotaUsed(); - - if (quotaUsed.equals(BigDecimal.ZERO)) { - aggregatedUsage = aggregatedUsage.add(aggregateCreditBetweenDates(accountId, domainId, quotaUsageStartDate, quotaUsageEndDate, accountToString)); - continue; - } - - if (startDate.compareTo(quotaUsageStartDate) == 0) { - aggregatedUsage = aggregatedUsage.subtract(quotaUsed); - continue; - } - - _quotaBalanceDao.saveQuotaBalance(new QuotaBalanceVO(accountId, domainId, aggregatedUsage, endDate)); - - aggregatedUsage = BigDecimal.ZERO; - startDate = quotaUsageStartDate; - endDate = quotaUsageEndDate; - - QuotaBalanceVO lastRealBalanceEntry = _quotaBalanceDao.findLastBalanceEntry(accountId, domainId, endDate); - Date lastBalanceDate = new Date(0); - - if (lastRealBalanceEntry != null) { - lastBalanceDate = lastRealBalanceEntry.getUpdatedOn(); - aggregatedUsage = aggregatedUsage.add(lastRealBalanceEntry.getCreditBalance()); - } - - aggregatedUsage = aggregatedUsage.add(aggregateCreditBetweenDates(accountId, domainId, lastBalanceDate, endDate, accountToString)); - aggregatedUsage = aggregatedUsage.subtract(quotaUsed); + accountBalance = calculateBalanceConsideringCreditsAddedAndQuotaUsed(accountBalance, accountQuotaUsages, accountId, domainId, startDate, endDate, accountToString); + _quotaBalanceDao.saveQuotaBalance(new QuotaBalanceVO(accountId, domainId, accountBalance, endDate)); } - - _quotaBalanceDao.saveQuotaBalance(new QuotaBalanceVO(accountId, domainId, aggregatedUsage, endDate)); - saveQuotaAccount(accountId, aggregatedUsage, endDate); + saveQuotaAccount(accountId, accountBalance, endDate); } - protected BigDecimal getUsageValueAccordingToLastQuotaUsageEntryAndLastQuotaBalance(long accountId, long domainId, Date startDate, Date endDate, BigDecimal aggregatedUsage, - String accountToString) { + /** + * Calculates the balance for the given account considering the specified period. The balance is calculated as follows: + *
    + *
  1. The credits added in this period are added to the balance.
  2. + *
  3. All quota consumed in this period are subtracted from the account balance.
  4. + *
+ */ + protected BigDecimal calculateBalanceConsideringCreditsAddedAndQuotaUsed(BigDecimal accountBalance, List accountQuotaUsages, long accountId, long domainId, + Date startDate, Date endDate, String accountToString) { + accountBalance = accountBalance.add(aggregateCreditBetweenDates(accountId, domainId, startDate, endDate, accountToString)); + + for (QuotaUsageVO quotaUsageVO : accountQuotaUsages) { + if (DateUtils.isSameInstant(quotaUsageVO.getStartDate(), startDate)) { + accountBalance = accountBalance.subtract(quotaUsageVO.getQuotaUsed()); + } + } + return accountBalance; + } + + /** + * Retrieves the initial balance prior to the period of the quota processing. + *
    + *
  • + * If it is the first time of processing for the account, the credits prior to the quota processing are added, and the first balance is persisted in the DB. + *
  • + *
  • + * Otherwise, the last real balance of the account is retrieved. + *
  • + *
+ */ + protected BigDecimal retrieveBalanceForUsageCalculation(long accountId, long domainId, Date startDate, String accountToString) { + BigDecimal accountBalance = BigDecimal.ZERO; QuotaUsageVO lastQuotaUsage = _quotaUsageDao.findLastQuotaUsageEntry(accountId, domainId, startDate); if (lastQuotaUsage == null) { - aggregatedUsage = aggregatedUsage.add(aggregateCreditBetweenDates(accountId, domainId, new Date(0), startDate, accountToString)); - QuotaBalanceVO firstBalance = new QuotaBalanceVO(accountId, domainId, aggregatedUsage, startDate); + accountBalance = accountBalance.add(aggregateCreditBetweenDates(accountId, domainId, new Date(0), startDate, accountToString)); + QuotaBalanceVO firstBalance = new QuotaBalanceVO(accountId, domainId, accountBalance, startDate); logger.debug(String.format("Persisting the first quota balance [%s] for account [%s].", firstBalance, accountToString)); _quotaBalanceDao.saveQuotaBalance(firstBalance); } else { - QuotaBalanceVO lastRealBalance = _quotaBalanceDao.findLastBalanceEntry(accountId, domainId, endDate); + QuotaBalanceVO lastRealBalance = _quotaBalanceDao.findLastBalanceEntry(accountId, domainId, startDate); - if (lastRealBalance != null) { - aggregatedUsage = aggregatedUsage.add(lastRealBalance.getCreditBalance()); - aggregatedUsage = aggregatedUsage.add(aggregateCreditBetweenDates(accountId, domainId, lastRealBalance.getUpdatedOn(), endDate, accountToString)); + if (lastRealBalance == null) { + logger.warn("Account [{}] has quota usage entries, however it does not have a quota balance.", accountToString); } else { - logger.warn(String.format("Account [%s] has quota usage entries, however it does not have a quota balance.", accountToString)); + accountBalance = accountBalance.add(lastRealBalance.getCreditBalance()); } } - return aggregatedUsage; + return accountBalance; } protected void saveQuotaAccount(long accountId, BigDecimal aggregatedUsage, Date endDate) { @@ -367,9 +373,22 @@ public class QuotaManagerImpl extends ManagerBase implements QuotaManager { PresetVariables presetVariables = getPresetVariables(hasAnyQuotaTariffWithActivationRule, usageRecord); BigDecimal aggregatedQuotaTariffsValue = BigDecimal.ZERO; + quotaTariffs.sort(Comparator.comparing(QuotaTariffVO::getPosition)); + + List lastTariffs = new ArrayList<>(); + + for (QuotaTariffVO quotaTariff : quotaTariffs) { if (isQuotaTariffInPeriodToBeApplied(usageRecord, quotaTariff, accountToString)) { - aggregatedQuotaTariffsValue = aggregatedQuotaTariffsValue.add(getQuotaTariffValueToBeApplied(quotaTariff, jsInterpreter, presetVariables)); + + BigDecimal tariffValue = getQuotaTariffValueToBeApplied(quotaTariff, jsInterpreter, presetVariables, lastTariffs); + + aggregatedQuotaTariffsValue = aggregatedQuotaTariffsValue.add(tariffValue); + + Tariff tariffPresetVariable = new Tariff(); + tariffPresetVariable.setId(quotaTariff.getUuid()); + tariffPresetVariable.setValue(tariffValue); + lastTariffs.add(tariffPresetVariable); } } @@ -397,7 +416,7 @@ public class QuotaManagerImpl extends ManagerBase implements QuotaManager { *
  • If the activation rule result in something else, returns {@link BigDecimal#ZERO}.
  • * */ - protected BigDecimal getQuotaTariffValueToBeApplied(QuotaTariffVO quotaTariff, JsInterpreter jsInterpreter, PresetVariables presetVariables) { + protected BigDecimal getQuotaTariffValueToBeApplied(QuotaTariffVO quotaTariff, JsInterpreter jsInterpreter, PresetVariables presetVariables, List lastAppliedTariffsList) { String activationRule = quotaTariff.getActivationRule(); BigDecimal quotaTariffValue = quotaTariff.getCurrencyValue(); String quotaTariffToString = quotaTariff.toString(usageAggregationTimeZone); @@ -409,6 +428,7 @@ public class QuotaManagerImpl extends ManagerBase implements QuotaManager { } injectPresetVariablesIntoJsInterpreter(jsInterpreter, presetVariables); + jsInterpreter.injectVariable("lastTariffs", lastAppliedTariffsList.toString()); String scriptResult = jsInterpreter.executeScript(activationRule).toString(); diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Account.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Account.java index c0b1f762f70..37c90ab0bcd 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Account.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Account.java @@ -17,7 +17,9 @@ package org.apache.cloudstack.quota.activationrule.presetvariables; -public class Account extends GenericPresetVariable{ +public class Account extends GenericPresetVariable { + @PresetVariableDefinition(description = "Role of the account. This field will not exist if the account is a project.") + private Role role; public Role getRole() { diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/BackupOffering.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/BackupOffering.java index 457e71a141f..d8457d294ec 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/BackupOffering.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/BackupOffering.java @@ -20,6 +20,7 @@ package org.apache.cloudstack.quota.activationrule.presetvariables; public class BackupOffering extends GenericPresetVariable { + @PresetVariableDefinition(description = "External ID of the backup offering that generated the backup.") private String externalId; public String getExternalId() { diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/ComputeOffering.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/ComputeOffering.java index b42c32a584e..1d294276d47 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/ComputeOffering.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/ComputeOffering.java @@ -18,6 +18,7 @@ package org.apache.cloudstack.quota.activationrule.presetvariables; public class ComputeOffering extends GenericPresetVariable { + @PresetVariableDefinition(description = "A boolean informing if the compute offering is customized or not.") private boolean customized; public boolean isCustomized() { diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/ComputingResources.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/ComputingResources.java index d4f335b081c..9c86d2d6e0c 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/ComputingResources.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/ComputingResources.java @@ -21,8 +21,13 @@ import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; public class ComputingResources { + @PresetVariableDefinition(description = "Current VM's memory (in MiB).") private Integer memory; + + @PresetVariableDefinition(description = "Current VM's vCPUs.") private Integer cpuNumber; + + @PresetVariableDefinition(description = "Current VM's CPU speed (in MHz).") private Integer cpuSpeed; public Integer getMemory() { diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Domain.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Domain.java index 01b702feb1a..6d83da4cd8f 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Domain.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Domain.java @@ -18,6 +18,7 @@ package org.apache.cloudstack.quota.activationrule.presetvariables; public class Domain extends GenericPresetVariable { + @PresetVariableDefinition(description = "Path of the domain owner of the resource.") private String path; public String getPath() { diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/GenericPresetVariable.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/GenericPresetVariable.java index b081e57611f..f59f23abdc1 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/GenericPresetVariable.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/GenericPresetVariable.java @@ -23,8 +23,12 @@ import java.util.Set; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class GenericPresetVariable { + @PresetVariableDefinition(description = "ID of the resource.") private String id; + + @PresetVariableDefinition(description = "Name of the resource.") private String name; + protected transient Set fieldNamesToIncludeInToString = new HashSet<>(); public String getId() { diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Host.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Host.java index fef3e4376dc..4a0fd2f5a07 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Host.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Host.java @@ -20,8 +20,10 @@ package org.apache.cloudstack.quota.activationrule.presetvariables; import java.util.List; public class Host extends GenericPresetVariable { + @PresetVariableDefinition(description = "List of tags of the host where the VM is running (i.e.: [\"a\", \"b\"]).") private List tags; + @PresetVariableDefinition(description = "Whether the tag is a rule interpreted in JavaScript.") private Boolean isTagARule; public List getTags() { diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableDefinition.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableDefinition.java new file mode 100644 index 00000000000..0e10a8af9d1 --- /dev/null +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableDefinition.java @@ -0,0 +1,42 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.quota.activationrule.presetvariables; + +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + +import static java.lang.annotation.ElementType.FIELD; +import static java.lang.annotation.RetentionPolicy.RUNTIME; + +/** + * Describes the preset variable and indicates to which Quota usage types it is loaded. + */ +@Target(FIELD) +@Retention(RUNTIME) +public @interface PresetVariableDefinition { + /** + * An array indicating for which Quota usage types the preset variable is loaded. + * @return an array with the usage types for which the preset variable is loaded. + */ + int[] supportedTypes() default 0; + + /** + * A {@link String} describing the preset variable. + * @return the description of the preset variable. + */ + String description() default ""; +} diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariables.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariables.java index 2fb6e1ac131..b27bf589c16 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariables.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariables.java @@ -19,11 +19,22 @@ package org.apache.cloudstack.quota.activationrule.presetvariables; public class PresetVariables { + @PresetVariableDefinition(description = "Account owner of the resource.") private Account account; + + @PresetVariableDefinition(description = "Domain owner of the resource.") private Domain domain; + + @PresetVariableDefinition(description = "Project owner of the resource. This field will not exist if the resource belongs to an account.") private GenericPresetVariable project; + + @PresetVariableDefinition(description = "Type of the record used. Examples for this are: VirtualMachine, DomainRouter, SourceNat, KVM.") private String resourceType; + + @PresetVariableDefinition(description = "Data related to the resource being processed.") private Value value; + + @PresetVariableDefinition(description = "Zone where the resource is.") private GenericPresetVariable zone; public Account getAccount() { diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Role.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Role.java index fc4716fc309..3f953b3a4ff 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Role.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Role.java @@ -20,6 +20,7 @@ package org.apache.cloudstack.quota.activationrule.presetvariables; import org.apache.cloudstack.acl.RoleType; public class Role extends GenericPresetVariable { + @PresetVariableDefinition(description = "Role type of the resource's owner.") private RoleType type; public RoleType getType() { diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Storage.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Storage.java index 6be1dfb025a..9b6cfb31092 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Storage.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Storage.java @@ -22,9 +22,13 @@ import java.util.List; import com.cloud.storage.ScopeType; public class Storage extends GenericPresetVariable { + @PresetVariableDefinition(description = "List of string representing the tags of the storage where the volume is (i.e.: [\"a\", \"b\"]).") private List tags; + @PresetVariableDefinition(description = "Whether the tag is a rule interpreted in JavaScript. Applicable only for primary storages.") private Boolean isTagARule; + + @PresetVariableDefinition(description = "Scope of the storage where the volume is. Values can be: ZONE, CLUSTER or HOST. Applicable only for primary storages.") private ScopeType scope; public List getTags() { diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Tariff.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Tariff.java new file mode 100644 index 00000000000..3703820a1a4 --- /dev/null +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Tariff.java @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.quota.activationrule.presetvariables; + +import java.math.BigDecimal; + +public class Tariff extends GenericPresetVariable { + private BigDecimal value; + + public BigDecimal getValue() { + return value; + } + + public void setValue(BigDecimal value) { + this.value = value; + fieldNamesToIncludeInToString.add("value"); + } +} diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Value.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Value.java index a1dc7b3c1bb..d87146d8798 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Value.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/Value.java @@ -23,25 +23,75 @@ import java.util.Map; import com.cloud.storage.Snapshot; import com.cloud.storage.Storage.ProvisioningType; import com.cloud.vm.snapshot.VMSnapshot; +import org.apache.cloudstack.quota.constant.QuotaTypes; public class Value extends GenericPresetVariable { + + @PresetVariableDefinition(description = "ID of the resource.", supportedTypes = {QuotaTypes.ALLOCATED_VM, QuotaTypes.RUNNING_VM, QuotaTypes.VOLUME, QuotaTypes.TEMPLATE, + QuotaTypes.ISO, QuotaTypes.SNAPSHOT, QuotaTypes.NETWORK_OFFERING, QuotaTypes.VM_SNAPSHOT}) + private String id; + + @PresetVariableDefinition(description = "Name of the resource.", supportedTypes = {QuotaTypes.ALLOCATED_VM, QuotaTypes.RUNNING_VM, QuotaTypes.VOLUME, QuotaTypes.TEMPLATE, + QuotaTypes.ISO, QuotaTypes.SNAPSHOT, QuotaTypes.NETWORK_OFFERING, QuotaTypes.VM_SNAPSHOT}) + private String name; + + @PresetVariableDefinition(description = "Host where the VM is running.", supportedTypes = {QuotaTypes.RUNNING_VM}) private Host host; + + @PresetVariableDefinition(description = "OS of the VM/template.", supportedTypes = {QuotaTypes.RUNNING_VM, QuotaTypes.ALLOCATED_VM, QuotaTypes.TEMPLATE, QuotaTypes.ISO}) private String osName; + + @PresetVariableDefinition(description = "A list of resources of the account between the start and end date of the usage record being calculated " + + "(i.e.: [{zoneId: ..., domainId:...}]).") private List accountResources; + + @PresetVariableDefinition(supportedTypes = {QuotaTypes.ALLOCATED_VM, QuotaTypes.RUNNING_VM, QuotaTypes.VOLUME, QuotaTypes.TEMPLATE, QuotaTypes.ISO, QuotaTypes.SNAPSHOT, + QuotaTypes.VM_SNAPSHOT}, description = "List of tags of the resource in the format key:value (i.e.: {\"a\":\"b\", \"c\":\"d\"}).") private Map tags; + + @PresetVariableDefinition(description = "Tag of the network offering.", supportedTypes = {QuotaTypes.NETWORK_OFFERING}) private String tag; + + @PresetVariableDefinition(description = "Size of the resource (in MiB).", supportedTypes = {QuotaTypes.TEMPLATE, QuotaTypes.ISO, QuotaTypes.VOLUME, QuotaTypes.SNAPSHOT, + QuotaTypes.BACKUP}) private Long size; + + @PresetVariableDefinition(description = "Virtual size of the backup.", supportedTypes = {QuotaTypes.BACKUP}) private Long virtualSize; + + @PresetVariableDefinition(description = "Provisioning type of the resource. Values can be: thin, sparse or fat.", supportedTypes = {QuotaTypes.VOLUME}) private ProvisioningType provisioningType; + + @PresetVariableDefinition(description = "Type of the snapshot. Values can be: MANUAL, RECURRING, HOURLY, DAILY, WEEKLY and MONTHLY.", supportedTypes = {QuotaTypes.SNAPSHOT}) private Snapshot.Type snapshotType; + + @PresetVariableDefinition(description = "Type of the VM snapshot. Values can be: Disk or DiskAndMemory.", supportedTypes = {QuotaTypes.VM_SNAPSHOT}) private VMSnapshot.Type vmSnapshotType; + + @PresetVariableDefinition(description = "Computing offering of the VM.", supportedTypes = {QuotaTypes.RUNNING_VM, QuotaTypes.ALLOCATED_VM}) private ComputeOffering computeOffering; + + @PresetVariableDefinition(description = "Template/ISO with which the VM was created.", supportedTypes = {QuotaTypes.RUNNING_VM, QuotaTypes.ALLOCATED_VM}) private GenericPresetVariable template; + + @PresetVariableDefinition(description = "Disk offering of the volume.", supportedTypes = {QuotaTypes.VOLUME}) private GenericPresetVariable diskOffering; + + @PresetVariableDefinition(description = "Storage where the volume or snapshot is. While handling with snapshots, this value can be from the primary storage if the global " + + "setting 'snapshot.backup.to.secondary' is false, otherwise it will be from secondary storage.", supportedTypes = {QuotaTypes.VOLUME, QuotaTypes.SNAPSHOT}) private Storage storage; + + @PresetVariableDefinition(description = "Computing resources consumed by the VM.", supportedTypes = {QuotaTypes.RUNNING_VM}) private ComputingResources computingResources; + + @PresetVariableDefinition(description = "Backup offering of the backup.", supportedTypes = {QuotaTypes.BACKUP}) private BackupOffering backupOffering; + + @PresetVariableDefinition(description = "The hypervisor where the resource was deployed. Values can be: XenServer, KVM, VMware, Hyperv, BareMetal, Ovm, Ovm3 and LXC.", + supportedTypes = {QuotaTypes.RUNNING_VM, QuotaTypes.ALLOCATED_VM, QuotaTypes.VM_SNAPSHOT, QuotaTypes.SNAPSHOT}) private String hypervisorType; + + @PresetVariableDefinition(description = "The volume format. Values can be: RAW, VHD, VHDX, OVA and QCOW2.", supportedTypes = {QuotaTypes.VOLUME, QuotaTypes.VOLUME_SECONDARY}) private String volumeFormat; private String state; diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/constant/QuotaConfig.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/constant/QuotaConfig.java index df7ffa5c3cd..81b4643eb45 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/constant/QuotaConfig.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/constant/QuotaConfig.java @@ -48,16 +48,16 @@ public interface QuotaConfig { public static final ConfigKey QuotaSmtpPort = new ConfigKey("Advanced", String.class, "quota.usage.smtp.port", "", "Quota SMTP port.", true); - public static final ConfigKey QuotaSmtpAuthType = new ConfigKey("Advanced", String.class, "quota.usage.smtp.useAuth", "", + public static final ConfigKey QuotaSmtpAuthType = new ConfigKey("Advanced", Boolean.class, "quota.usage.smtp.useAuth", "false", "If true, use secure SMTP authentication when sending emails.", true); public static final ConfigKey QuotaSmtpSender = new ConfigKey("Advanced", String.class, "quota.usage.smtp.sender", "", "Sender of quota alert email (will be in the From header of the email).", true); public static final ConfigKey QuotaSmtpEnabledSecurityProtocols = new ConfigKey("Advanced", String.class, "quota.usage.smtp.enabledSecurityProtocols", "", - "White-space separated security protocols; ex: \"TLSv1 TLSv1.1\". Supported protocols: SSLv2Hello, SSLv3, TLSv1, TLSv1.1 and TLSv1.2.", true); + "White-space separated security protocols; ex: \"TLSv1 TLSv1.1\". Supported protocols: SSLv2Hello, SSLv3, TLSv1, TLSv1.1 and TLSv1.2.", true, ConfigKey.Kind.WhitespaceSeparatedListWithOptions, "SSLv2Hello,SSLv3,TLSv1,TLSv1.1,TLSv1.2"); - public static final ConfigKey QuotaSmtpUseStartTLS = new ConfigKey("Advanced", String.class, "quota.usage.smtp.useStartTLS", "false", + public static final ConfigKey QuotaSmtpUseStartTLS = new ConfigKey("Advanced", Boolean.class, "quota.usage.smtp.useStartTLS", "false", "If set to true and if we enable security via quota.usage.smtp.useAuth, this will enable StartTLS to secure the connection.", true); public static final ConfigKey QuotaActivationRuleTimeout = new ConfigKey<>("Advanced", Long.class, "quota.activationrule.timeout", "2000", "The maximum runtime," diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/constant/QuotaTypes.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/constant/QuotaTypes.java index 3ed162b2ba1..947183577a8 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/constant/QuotaTypes.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/constant/QuotaTypes.java @@ -22,6 +22,7 @@ import java.util.Map; import org.apache.cloudstack.usage.UsageTypes; import org.apache.cloudstack.usage.UsageUnitTypes; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class QuotaTypes extends UsageTypes { private final Integer quotaType; @@ -100,4 +101,13 @@ public class QuotaTypes extends UsageTypes { } return null; } + + static public QuotaTypes getQuotaType(int quotaType) { + return quotaTypeMap.get(quotaType); + } + + @Override + public String toString() { + return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "quotaType", "quotaName"); + } } diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaTariffDao.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaTariffDao.java index 4f13fb33180..419bb0ad7d2 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaTariffDao.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaTariffDao.java @@ -28,18 +28,10 @@ public interface QuotaTariffDao extends GenericDao { Pair, Integer> listQuotaTariffs(Date startDate, Date endDate, Integer usageType, String name, String uuid, boolean listAll, Long startIndex, Long pageSize); + Pair, Integer> listQuotaTariffs(Date startDate, Date endDate, Integer usageType, String name, String uuid, boolean listAll, boolean listOnlyRemoved, Long startIndex, Long pageSize, String keyword); + QuotaTariffVO findByName(String name); - QuotaTariffVO findTariffPlanByUsageType(int quotaType, Date onOrBefore); - - Pair, Integer> listAllTariffPlans(); - - Pair, Integer> listAllTariffPlans(final Long startIndex, final Long pageSize); - - Pair, Integer> listAllTariffPlans(Date onOrBefore); - - Pair, Integer> listAllTariffPlans(Date onOrBefore, Long startIndex, Long pageSize); - Boolean updateQuotaTariff(QuotaTariffVO plan); QuotaTariffVO addQuotaTariff(QuotaTariffVO plan); diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaTariffDaoImpl.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaTariffDaoImpl.java index 8cbec8c8598..d36c698f44d 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaTariffDaoImpl.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaTariffDaoImpl.java @@ -16,12 +16,9 @@ //under the License. package org.apache.cloudstack.quota.dao; -import java.util.ArrayList; -import java.util.Collections; import java.util.Date; import java.util.List; -import org.apache.cloudstack.quota.constant.QuotaTypes; import org.apache.cloudstack.quota.vo.QuotaTariffVO; import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; @@ -34,7 +31,6 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; import com.cloud.utils.db.TransactionCallback; import com.cloud.utils.db.TransactionLegacy; -import com.cloud.utils.db.TransactionStatus; @Component public class QuotaTariffDaoImpl extends GenericDaoBase implements QuotaTariffDao { @@ -45,7 +41,7 @@ public class QuotaTariffDaoImpl extends GenericDaoBase impl public QuotaTariffDaoImpl() { super(); searchUsageType = createSearchBuilder(); - searchUsageType.and("usage_type", searchUsageType.entity().getUsageType(), SearchCriteria.Op.EQ); + searchUsageType.and("usageType", searchUsageType.entity().getUsageType(), SearchCriteria.Op.EQ); searchUsageType.done(); listAllIncludedUsageType = createSearchBuilder(); @@ -54,111 +50,28 @@ public class QuotaTariffDaoImpl extends GenericDaoBase impl listAllIncludedUsageType.done(); } - @Override - public QuotaTariffVO findTariffPlanByUsageType(final int quotaType, final Date effectiveDate) { - return Transaction.execute(TransactionLegacy.USAGE_DB, new TransactionCallback() { - @Override - public QuotaTariffVO doInTransaction(final TransactionStatus status) { - List result = new ArrayList<>(); - final Filter filter = new Filter(QuotaTariffVO.class, "updatedOn", false, 0L, 1L); - final SearchCriteria sc = listAllIncludedUsageType.create(); - sc.setParameters("onorbefore", effectiveDate); - sc.setParameters("quotatype", quotaType); - result = search(sc, filter); - if (result != null && !result.isEmpty()) { - return result.get(0); - } else { - if (logger.isDebugEnabled()) { - logger.debug("QuotaTariffDaoImpl::findTariffPlanByUsageType: Missing quota type " + quotaType); - } - return null; - } - } - }); - } - - @Override - public Pair, Integer> listAllTariffPlans() { - return listAllTariffPlans(null, null); - } - - @Override - public Pair, Integer> listAllTariffPlans(final Long startIndex, final Long pageSize) { - return Transaction.execute(TransactionLegacy.USAGE_DB, new TransactionCallback, Integer>>() { - @Override - public Pair, Integer> doInTransaction(final TransactionStatus status) { - return searchAndCount(null, new Filter(QuotaTariffVO.class, "updatedOn", false, startIndex, pageSize)); - } - }); - } - - - private List paginateList(final List list, final Long startIndex, final Long pageSize) { - if (startIndex == null || pageSize == null) { - return list; - } - if (list.size() < startIndex){ - return Collections.emptyList(); - } - return list.subList(startIndex.intValue(), (int) Math.min(startIndex + pageSize, list.size())); - } - - @Override - public Pair, Integer> listAllTariffPlans(final Date effectiveDate) { - return listAllTariffPlans(effectiveDate, null, null); - } - - @Override - public Pair, Integer> listAllTariffPlans(final Date effectiveDate, final Long startIndex, final Long pageSize) { - return Transaction.execute(TransactionLegacy.USAGE_DB, new TransactionCallback, Integer>>() { - @Override - public Pair, Integer> doInTransaction(final TransactionStatus status) { - List tariffs = new ArrayList(); - final Filter filter = new Filter(QuotaTariffVO.class, "updatedOn", false, 0L, 1L); - final SearchCriteria sc = listAllIncludedUsageType.create(); - sc.setParameters("onorbefore", effectiveDate); - for (Integer quotaType : QuotaTypes.listQuotaTypes().keySet()) { - sc.setParameters("quotatype", quotaType); - List result = search(sc, filter); - if (result != null && !result.isEmpty()) { - tariffs.add(result.get(0)); - if (logger.isDebugEnabled()) { - logger.debug("ListAllTariffPlans on or before " + effectiveDate + " quota type " + result.get(0).getUsageTypeDescription() + " , effective Date=" - + result.get(0).getEffectiveOn() + " val=" + result.get(0).getCurrencyValue()); - } - } - } - return new Pair<>(paginateList(tariffs, startIndex, pageSize), tariffs.size()); - } - }); - } - @Override public Boolean updateQuotaTariff(final QuotaTariffVO plan) { - return Transaction.execute(TransactionLegacy.USAGE_DB, new TransactionCallback() { - @Override - public Boolean doInTransaction(final TransactionStatus status) { - return update(plan.getId(), plan); - } - }); + return Transaction.execute(TransactionLegacy.USAGE_DB, (TransactionCallback) status -> update(plan.getId(), plan)); } @Override public QuotaTariffVO addQuotaTariff(final QuotaTariffVO plan) { if (plan.getIdObj() != null) { - throw new IllegalStateException("The QuotaTariffVO being added should not have an Id set "); + throw new IllegalStateException("The QuotaTariffVO being added should not have an Id set."); } - return Transaction.execute(TransactionLegacy.USAGE_DB, new TransactionCallback() { - @Override - public QuotaTariffVO doInTransaction(final TransactionStatus status) { - return persist(plan); - } - }); + return Transaction.execute(TransactionLegacy.USAGE_DB, (TransactionCallback) status -> persist(plan)); } @Override public Pair, Integer> listQuotaTariffs(Date startDate, Date endDate, Integer usageType, String name, String uuid, boolean listAll, Long startIndex, Long pageSize) { - SearchCriteria searchCriteria = createListQuotaTariffsSearchCriteria(startDate, endDate, usageType, name, uuid); + return listQuotaTariffs(startDate, endDate, usageType, name, uuid, listAll, false, startIndex, pageSize, null); + } + + @Override + public Pair, Integer> listQuotaTariffs(Date startDate, Date endDate, Integer usageType, String name, String uuid, boolean listAll, boolean listOnlyRemoved, Long startIndex, Long pageSize, String keyword) { + SearchCriteria searchCriteria = createListQuotaTariffsSearchCriteria(startDate, endDate, usageType, name, uuid, listOnlyRemoved, keyword); + Filter sorter = new Filter(QuotaTariffVO.class, "usageType", false, startIndex, pageSize); sorter.addOrderBy(QuotaTariffVO.class, "effectiveOn", false); sorter.addOrderBy(QuotaTariffVO.class, "updatedOn", false); @@ -166,39 +79,34 @@ public class QuotaTariffDaoImpl extends GenericDaoBase impl return Transaction.execute(TransactionLegacy.USAGE_DB, (TransactionCallback, Integer>>) status -> searchAndCount(searchCriteria, sorter, listAll)); } - protected SearchCriteria createListQuotaTariffsSearchCriteria(Date startDate, Date endDate, Integer usageType, String name, String uuid) { - SearchCriteria searchCriteria = createListQuotaTariffsSearchBuilder(startDate, endDate, usageType, name, uuid).create(); + protected SearchCriteria createListQuotaTariffsSearchCriteria(Date startDate, Date endDate, Integer usageType, String name, String uuid, boolean listOnlyRemoved, String keyword) { + SearchCriteria searchCriteria = createListQuotaTariffsSearchBuilder(listOnlyRemoved).create(); - searchCriteria.setParametersIfNotNull("start_date", startDate); - searchCriteria.setParametersIfNotNull("end_date", endDate); - searchCriteria.setParametersIfNotNull("usage_type", usageType); + searchCriteria.setParametersIfNotNull("startDate", startDate); + searchCriteria.setParametersIfNotNull("endDate", endDate); + searchCriteria.setParametersIfNotNull("usageType", usageType); searchCriteria.setParametersIfNotNull("name", name); searchCriteria.setParametersIfNotNull("uuid", uuid); + if (keyword != null) { + searchCriteria.setParameters("nameLike", "%" + keyword + "%"); + } + return searchCriteria; } - protected SearchBuilder createListQuotaTariffsSearchBuilder(Date startDate, Date endDate, Integer usageType, String name, String uuid) { + protected SearchBuilder createListQuotaTariffsSearchBuilder(boolean listOnlyRemoved) { SearchBuilder searchBuilder = createSearchBuilder(); - if (startDate != null) { - searchBuilder.and("start_date", searchBuilder.entity().getEffectiveOn(), SearchCriteria.Op.GTEQ); - } + searchBuilder.and("startDate", searchBuilder.entity().getEffectiveOn(), SearchCriteria.Op.GTEQ); + searchBuilder.and("endDate", searchBuilder.entity().getEndDate(), SearchCriteria.Op.LTEQ); + searchBuilder.and("usageType", searchBuilder.entity().getUsageType(), SearchCriteria.Op.EQ); + searchBuilder.and("name", searchBuilder.entity().getName(), SearchCriteria.Op.EQ); + searchBuilder.and("uuid", searchBuilder.entity().getUuid(), SearchCriteria.Op.EQ); + searchBuilder.and("nameLike", searchBuilder.entity().getName(), SearchCriteria.Op.LIKE); - if (endDate != null) { - searchBuilder.and("end_date", searchBuilder.entity().getEndDate(), SearchCriteria.Op.LTEQ); - } - - if (usageType != null) { - searchBuilder.and("usage_type", searchBuilder.entity().getUsageType(), SearchCriteria.Op.EQ); - } - - if (name != null) { - searchBuilder.and("name", searchBuilder.entity().getName(), SearchCriteria.Op.EQ); - } - - if (uuid != null) { - searchBuilder.and("uuid", searchBuilder.entity().getUuid(), SearchCriteria.Op.EQ); + if (listOnlyRemoved) { + searchBuilder.and("removed", searchBuilder.entity().getRemoved(), SearchCriteria.Op.NNULL); } return searchBuilder; diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/vo/QuotaTariffVO.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/vo/QuotaTariffVO.java index 40a751c6200..bd6aeb13418 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/vo/QuotaTariffVO.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/vo/QuotaTariffVO.java @@ -93,6 +93,10 @@ public class QuotaTariffVO implements QuotaTariff { @Temporal(value = TemporalType.TIMESTAMP) private Date endDate; + @Column(name = "position") + protected Integer position; + + public QuotaTariffVO() { } @@ -120,6 +124,7 @@ public class QuotaTariffVO implements QuotaTariff { this.setDescription(that.getDescription()); this.setActivationRule(that.getActivationRule()); this.setEndDate(that.getEndDate()); + this.setPosition(that.getPosition()); } public void setId(Long id) { @@ -263,6 +268,15 @@ public class QuotaTariffVO implements QuotaTariff { return true; } + public Integer getPosition() { + return position; + } + + public void setPosition(Integer position) { + this.position = position; + } + + @Override public String toString() { return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "uuid", "name", "usageName"); diff --git a/framework/quota/src/test/java/org/apache/cloudstack/quota/QuotaManagerImplTest.java b/framework/quota/src/test/java/org/apache/cloudstack/quota/QuotaManagerImplTest.java index e53051f2925..5dfc12f7ef8 100644 --- a/framework/quota/src/test/java/org/apache/cloudstack/quota/QuotaManagerImplTest.java +++ b/framework/quota/src/test/java/org/apache/cloudstack/quota/QuotaManagerImplTest.java @@ -29,6 +29,7 @@ import org.apache.cloudstack.quota.activationrule.presetvariables.Domain; import org.apache.cloudstack.quota.activationrule.presetvariables.GenericPresetVariable; import org.apache.cloudstack.quota.activationrule.presetvariables.PresetVariableHelper; import org.apache.cloudstack.quota.activationrule.presetvariables.PresetVariables; +import org.apache.cloudstack.quota.activationrule.presetvariables.Tariff; import org.apache.cloudstack.quota.activationrule.presetvariables.Value; import org.apache.cloudstack.quota.constant.QuotaTypes; import org.apache.cloudstack.quota.dao.QuotaTariffDao; @@ -395,7 +396,7 @@ public class QuotaManagerImplTest { Mockito.doReturn(null).when(quotaTariffVoMock).getActivationRule(); Mockito.doReturn(BigDecimal.ONE).when(quotaTariffVoMock).getCurrencyValue(); - BigDecimal result = quotaManagerImplSpy.getQuotaTariffValueToBeApplied(quotaTariffVoMock, null, null); + BigDecimal result = quotaManagerImplSpy.getQuotaTariffValueToBeApplied(quotaTariffVoMock, null, null, null); Assert.assertEquals(BigDecimal.ONE, result); } @@ -405,7 +406,7 @@ public class QuotaManagerImplTest { Mockito.doReturn("").when(quotaTariffVoMock).getActivationRule(); Mockito.doReturn(BigDecimal.TEN).when(quotaTariffVoMock).getCurrencyValue(); - BigDecimal result = quotaManagerImplSpy.getQuotaTariffValueToBeApplied(quotaTariffVoMock, null, null); + BigDecimal result = quotaManagerImplSpy.getQuotaTariffValueToBeApplied(quotaTariffVoMock, null, null, null); Assert.assertEquals(BigDecimal.TEN, result); } @@ -413,13 +414,15 @@ public class QuotaManagerImplTest { @Test public void getQuotaTariffValueToBeAppliedTestScriptResultIsNumberReturnIt() { BigDecimal expected = new BigDecimal(50.1); + List lastTariffs = createLastAppliedTariffsPresetVariableList(0); + Mockito.doReturn(" ").when(quotaTariffVoMock).getActivationRule(); Mockito.doReturn(BigDecimal.TEN).when(quotaTariffVoMock).getCurrencyValue(); Mockito.doNothing().when(quotaManagerImplSpy).injectPresetVariablesIntoJsInterpreter(Mockito.any(), Mockito.any()); Mockito.doReturn(expected).when(jsInterpreterMock).executeScript(Mockito.anyString()); - BigDecimal result = quotaManagerImplSpy.getQuotaTariffValueToBeApplied(quotaTariffVoMock, jsInterpreterMock, presetVariablesMock); + BigDecimal result = quotaManagerImplSpy.getQuotaTariffValueToBeApplied(quotaTariffVoMock, jsInterpreterMock, presetVariablesMock, lastTariffs); Assert.assertEquals(expected, result); } @@ -427,37 +430,42 @@ public class QuotaManagerImplTest { @Test public void getQuotaTariffValueToBeAppliedTestScriptResultIsTrueReturnTariffValue() { BigDecimal expected = new BigDecimal(236.84); + List lastTariffs = createLastAppliedTariffsPresetVariableList(0); Mockito.doReturn(" ").when(quotaTariffVoMock).getActivationRule(); Mockito.doReturn(expected).when(quotaTariffVoMock).getCurrencyValue(); Mockito.doNothing().when(quotaManagerImplSpy).injectPresetVariablesIntoJsInterpreter(Mockito.any(), Mockito.any()); Mockito.doReturn(true).when(jsInterpreterMock).executeScript(Mockito.anyString()); - BigDecimal result = quotaManagerImplSpy.getQuotaTariffValueToBeApplied(quotaTariffVoMock, jsInterpreterMock, presetVariablesMock); + BigDecimal result = quotaManagerImplSpy.getQuotaTariffValueToBeApplied(quotaTariffVoMock, jsInterpreterMock, presetVariablesMock, lastTariffs); Assert.assertEquals(expected, result); } @Test public void getQuotaTariffValueToBeAppliedTestScriptResultIsFalseReturnZero() { + List lastTariffs = createLastAppliedTariffsPresetVariableList(0); + Mockito.doReturn(" ").when(quotaTariffVoMock).getActivationRule(); Mockito.doReturn(BigDecimal.TEN).when(quotaTariffVoMock).getCurrencyValue(); Mockito.doNothing().when(quotaManagerImplSpy).injectPresetVariablesIntoJsInterpreter(Mockito.any(), Mockito.any()); Mockito.doReturn(false).when(jsInterpreterMock).executeScript(Mockito.anyString()); - BigDecimal result = quotaManagerImplSpy.getQuotaTariffValueToBeApplied(quotaTariffVoMock, jsInterpreterMock, presetVariablesMock); + BigDecimal result = quotaManagerImplSpy.getQuotaTariffValueToBeApplied(quotaTariffVoMock, jsInterpreterMock, presetVariablesMock, lastTariffs); Assert.assertEquals(BigDecimal.ZERO, result); } @Test public void getQuotaTariffValueToBeAppliedTestScriptResultIsNotBooleanNorNumericReturnZero() { + List lastTariffs = createLastAppliedTariffsPresetVariableList(0); + Mockito.doReturn(" ").when(quotaTariffVoMock).getActivationRule(); Mockito.doReturn(BigDecimal.TEN).when(quotaTariffVoMock).getCurrencyValue(); Mockito.doNothing().when(quotaManagerImplSpy).injectPresetVariablesIntoJsInterpreter(Mockito.any(), Mockito.any()); Mockito.doReturn("test").when(jsInterpreterMock).executeScript(Mockito.anyString()); - BigDecimal result = quotaManagerImplSpy.getQuotaTariffValueToBeApplied(quotaTariffVoMock, jsInterpreterMock, presetVariablesMock); + BigDecimal result = quotaManagerImplSpy.getQuotaTariffValueToBeApplied(quotaTariffVoMock, jsInterpreterMock, presetVariablesMock, lastTariffs); Assert.assertEquals(BigDecimal.ZERO, result); } @@ -477,10 +485,7 @@ public class QuotaManagerImplTest { @Test public void aggregateQuotaTariffsValuesTestTariffsWereNotInPeriodToBeAppliedReturnZero() { - List tariffs = new ArrayList<>(); - tariffs.add(new QuotaTariffVO()); - tariffs.add(new QuotaTariffVO()); - tariffs.add(new QuotaTariffVO()); + List tariffs = createTariffList(); Mockito.doReturn(false).when(quotaManagerImplSpy).isQuotaTariffInPeriodToBeApplied(Mockito.any(), Mockito.any(), Mockito.anyString()); BigDecimal result = quotaManagerImplSpy.aggregateQuotaTariffsValues(usageVoMock, tariffs, false, jsInterpreterMock, ""); @@ -497,13 +502,10 @@ public class QuotaManagerImplTest { @Test public void aggregateQuotaTariffsValuesTestTariffsAreInPeriodToBeAppliedReturnAggregation() { - List tariffs = new ArrayList<>(); - tariffs.add(new QuotaTariffVO()); - tariffs.add(new QuotaTariffVO()); - tariffs.add(new QuotaTariffVO()); + List tariffs = createTariffList(); Mockito.doReturn(true, false, true).when(quotaManagerImplSpy).isQuotaTariffInPeriodToBeApplied(Mockito.any(), Mockito.any(), Mockito.anyString()); - Mockito.doReturn(BigDecimal.TEN).when(quotaManagerImplSpy).getQuotaTariffValueToBeApplied(Mockito.any(), Mockito.any(), Mockito.any()); + Mockito.doReturn(BigDecimal.TEN).when(quotaManagerImplSpy).getQuotaTariffValueToBeApplied(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); BigDecimal result = quotaManagerImplSpy.aggregateQuotaTariffsValues(usageVoMock, tariffs, false, jsInterpreterMock, ""); Assert.assertEquals(BigDecimal.TEN.multiply(new BigDecimal(2)), result); @@ -528,4 +530,25 @@ public class QuotaManagerImplTest { Assert.assertEquals(quotaUsageVoMock1, result.get(0)); Assert.assertEquals(quotaUsageVoMock2, result.get(1)); } + + private static List createTariffList() { + List tariffs = new ArrayList<>(); + tariffs.add(new QuotaTariffVO()); + tariffs.add(new QuotaTariffVO()); + tariffs.add(new QuotaTariffVO()); + tariffs.forEach(quotaTariffVO -> quotaTariffVO.setPosition(1)); + return tariffs; + } + + private static List createLastAppliedTariffsPresetVariableList(int numberOfTariffs) { + List lastTariffs = new ArrayList<>(); + for (int i = 0; i < numberOfTariffs; i++) { + Tariff tariff = new Tariff(); + tariff.setId(String.valueOf(i)); + tariff.setValue(BigDecimal.valueOf(i)); + lastTariffs.add(tariff); + } + return lastTariffs; + } + } diff --git a/framework/quota/src/test/java/org/apache/cloudstack/quota/activationrule/presetvariables/ValueTest.java b/framework/quota/src/test/java/org/apache/cloudstack/quota/activationrule/presetvariables/ValueTest.java index 4d0162b33c9..bad33da8836 100644 --- a/framework/quota/src/test/java/org/apache/cloudstack/quota/activationrule/presetvariables/ValueTest.java +++ b/framework/quota/src/test/java/org/apache/cloudstack/quota/activationrule/presetvariables/ValueTest.java @@ -25,6 +25,20 @@ import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class ValueTest { + @Test + public void setIdTestAddFieldIdToCollection() { + Value variable = new Value(); + variable.setId(null); + Assert.assertTrue(variable.fieldNamesToIncludeInToString.contains("id")); + } + + @Test + public void setNameTestAddFieldNameToCollection() { + Value variable = new Value(); + variable.setName(null); + Assert.assertTrue(variable.fieldNamesToIncludeInToString.contains("name")); + } + @Test public void setHostTestAddFieldHostToCollection() { Value variable = new Value(); diff --git a/framework/security/src/main/java/org/apache/cloudstack/framework/security/keystore/KeystoreManager.java b/framework/security/src/main/java/org/apache/cloudstack/framework/security/keystore/KeystoreManager.java index c44347c85ef..18b840e7a8c 100644 --- a/framework/security/src/main/java/org/apache/cloudstack/framework/security/keystore/KeystoreManager.java +++ b/framework/security/src/main/java/org/apache/cloudstack/framework/security/keystore/KeystoreManager.java @@ -18,6 +18,7 @@ package org.apache.cloudstack.framework.security.keystore; import com.cloud.agent.api.LogLevel; import com.cloud.agent.api.LogLevel.Log4jLevel; +import com.cloud.utils.Pair; import com.cloud.utils.component.Manager; public interface KeystoreManager extends Manager { @@ -59,7 +60,7 @@ public interface KeystoreManager extends Manager { } } - boolean validateCertificate(String certificate, String key, String domainSuffix); + Pair validateCertificate(String certificate, String key, String domainSuffix); void saveCertificate(String name, String certificate, String key, String domainSuffix); diff --git a/framework/security/src/main/java/org/apache/cloudstack/framework/security/keystore/KeystoreManagerImpl.java b/framework/security/src/main/java/org/apache/cloudstack/framework/security/keystore/KeystoreManagerImpl.java index 3fc2ff3702e..3e01942fb2b 100644 --- a/framework/security/src/main/java/org/apache/cloudstack/framework/security/keystore/KeystoreManagerImpl.java +++ b/framework/security/src/main/java/org/apache/cloudstack/framework/security/keystore/KeystoreManagerImpl.java @@ -30,6 +30,7 @@ import java.util.regex.Pattern; import javax.inject.Inject; +import com.cloud.utils.Pair; import org.apache.commons.lang3.StringUtils; import org.springframework.stereotype.Component; @@ -45,24 +46,28 @@ public class KeystoreManagerImpl extends ManagerBase implements KeystoreManager private KeystoreDao _ksDao; @Override - public boolean validateCertificate(String certificate, String key, String domainSuffix) { + public Pair validateCertificate(String certificate, String key, String domainSuffix) { + String errMsg = null; if (StringUtils.isAnyEmpty(certificate, key, domainSuffix)) { - logger.error("Invalid parameter found in (certificate, key, domainSuffix) tuple for domain: " + domainSuffix); - return false; + errMsg = String.format("Invalid parameter found in (certificate, key, domainSuffix) tuple for domain: %s", domainSuffix); + logger.error(errMsg); + return new Pair<>(false, errMsg); } try { String ksPassword = "passwordForValidation"; byte[] ksBits = CertificateHelper.buildAndSaveKeystore(domainSuffix, certificate, getKeyContent(key), ksPassword); KeyStore ks = CertificateHelper.loadKeystore(ksBits, ksPassword); - if (ks != null) - return true; - - logger.error("Unabled to construct keystore for domain: " + domainSuffix); + if (ks != null) { + return new Pair<>(true, errMsg); + } + errMsg = String.format("Unable to construct keystore for domain: %s", domainSuffix); + logger.error(errMsg); } catch (Exception e) { - logger.error("Certificate validation failed due to exception for domain: " + domainSuffix, e); + errMsg = String.format("Certificate validation failed due to exception for domain: %s", domainSuffix); + logger.error(errMsg, e); } - return false; + return new Pair<>(false, errMsg); } @Override diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java index 15c1ccac5d9..b913033259c 100644 --- a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java @@ -69,7 +69,12 @@ public class CloudStackExtendedLifeCycle extends AbstractBeanCollector { with(new WithComponentLifeCycle() { @Override public void with(ComponentLifecycle lifecycle) { - lifecycle.start(); + logger.info("starting bean {}.", lifecycle.getName()); + try { + lifecycle.start(); + } catch (Exception e) { + logger.error("Error on starting bean {} - {}", lifecycle.getName(), e.getMessage(), e); + } if (lifecycle instanceof ManagementBean) { ManagementBean mbean = (ManagementBean)lifecycle; @@ -93,13 +98,21 @@ public class CloudStackExtendedLifeCycle extends AbstractBeanCollector { } public void stopBeans() { + logger.info("Stopping CloudStack Components"); + with(new WithComponentLifeCycle() { @Override public void with(ComponentLifecycle lifecycle) { - logger.info("stopping bean " + lifecycle.getName()); - lifecycle.stop(); + logger.info("stopping bean {}.", lifecycle.getName()); + try { + lifecycle.stop(); + } catch (Exception e) { + logger.error("Error on stopping bean {} - {}", lifecycle.getName(), e.getMessage(), e); + } } }); + + logger.info("Done Stopping CloudStack Components"); } private void configure() { @@ -109,10 +122,13 @@ public class CloudStackExtendedLifeCycle extends AbstractBeanCollector { @Override public void with(ComponentLifecycle lifecycle) { try { + logger.info("configuring bean {}.", lifecycle.getName()); lifecycle.configure(lifecycle.getName(), lifecycle.getConfigParams()); } catch (ConfigurationException e) { logger.error("Failed to configure " + lifecycle.getName(), e); throw new CloudRuntimeException(e); + } catch (Exception e) { + logger.error("Error on configuring bean {} - {}", lifecycle.getName(), e.getMessage(), e); } } }); diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycleStart.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycleStart.java index 0dc72f93195..85d25ffd9a4 100644 --- a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycleStart.java +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycleStart.java @@ -45,5 +45,4 @@ public class CloudStackExtendedLifeCycleStart extends AbstractSmartLifeCycle imp public void run() { lifeCycle.startBeans(); } - } diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/factory/CloudStackSpringContext.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/factory/CloudStackSpringContext.java index 8bbbc35f7e5..361a66fe1ba 100644 --- a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/factory/CloudStackSpringContext.java +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/factory/CloudStackSpringContext.java @@ -77,8 +77,10 @@ public class CloudStackSpringContext { for (String appName : contextMap.keySet()) { ApplicationContext contex = contextMap.get(appName); if (contex instanceof ConfigurableApplicationContext) { - logger.trace("registering shutdown hook for bean "+ appName); + logger.trace("Registering shutdown hook for bean {}.", appName); ((ConfigurableApplicationContext)contex).registerShutdownHook(); + } else { + logger.warn("Shutdown hook not registered for bean {}.", appName); } } } diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java index d61e26fc3a8..2a6d0b63e5c 100644 --- a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java @@ -102,7 +102,9 @@ public class DefaultModuleDefinitionSet implements ModuleDefinitionSet { logger.debug(String.format("Trying to obtain module [%s] context.", moduleDefinitionName)); ApplicationContext context = getApplicationContext(moduleDefinitionName); try { - if (context.containsBean("moduleStartup")) { + if (context == null) { + logger.warn(String.format("Application context not found for module definition [%s]", moduleDefinitionName)); + } else if (context.containsBean("moduleStartup")) { Runnable runnable = context.getBean("moduleStartup", Runnable.class); logger.info(String.format("Starting module [%s].", moduleDefinitionName)); runnable.run(); diff --git a/framework/spring/module/src/test/resources/testhierarchy/base/test-context-inheritable.xml b/framework/spring/module/src/test/resources/testhierarchy/base/test-context-inheritable.xml index d15e8a97919..ac219a531bb 100644 --- a/framework/spring/module/src/test/resources/testhierarchy/base/test-context-inheritable.xml +++ b/framework/spring/module/src/test/resources/testhierarchy/base/test-context-inheritable.xml @@ -24,5 +24,5 @@ http://www.springframework.org/schema/context/spring-context.xsd"> - +
    diff --git a/framework/spring/module/src/test/resources/testhierarchy/base/test-context.xml b/framework/spring/module/src/test/resources/testhierarchy/base/test-context.xml index b79711e1f53..a6342820b46 100644 --- a/framework/spring/module/src/test/resources/testhierarchy/base/test-context.xml +++ b/framework/spring/module/src/test/resources/testhierarchy/base/test-context.xml @@ -26,9 +26,9 @@ - + - + diff --git a/framework/spring/module/src/test/resources/testhierarchy/child1-1/test-context.xml b/framework/spring/module/src/test/resources/testhierarchy/child1-1/test-context.xml index 4e8f2db8f9e..9c63227162f 100644 --- a/framework/spring/module/src/test/resources/testhierarchy/child1-1/test-context.xml +++ b/framework/spring/module/src/test/resources/testhierarchy/child1-1/test-context.xml @@ -26,9 +26,9 @@ - + - + diff --git a/framework/spring/module/src/test/resources/testhierarchy/child1/test-context-override.xml b/framework/spring/module/src/test/resources/testhierarchy/child1/test-context-override.xml index 0ea8893100f..59820de453c 100644 --- a/framework/spring/module/src/test/resources/testhierarchy/child1/test-context-override.xml +++ b/framework/spring/module/src/test/resources/testhierarchy/child1/test-context-override.xml @@ -22,9 +22,9 @@ http://www.springframework.org/schema/beans/spring-beans.xsd http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context.xsd"> - + - + diff --git a/framework/spring/module/src/test/resources/testhierarchy/child1/test-context.xml b/framework/spring/module/src/test/resources/testhierarchy/child1/test-context.xml index 98b663ebf84..ecaef681bef 100644 --- a/framework/spring/module/src/test/resources/testhierarchy/child1/test-context.xml +++ b/framework/spring/module/src/test/resources/testhierarchy/child1/test-context.xml @@ -28,11 +28,11 @@ - + - + - + diff --git a/framework/spring/module/src/test/resources/testhierarchy/child2/test-context.xml b/framework/spring/module/src/test/resources/testhierarchy/child2/test-context.xml index 1c4404649eb..e3577b52f28 100644 --- a/framework/spring/module/src/test/resources/testhierarchy/child2/test-context.xml +++ b/framework/spring/module/src/test/resources/testhierarchy/child2/test-context.xml @@ -26,7 +26,7 @@ - + diff --git a/framework/spring/module/src/test/resources/testhierarchy/excluded/test-context.xml b/framework/spring/module/src/test/resources/testhierarchy/excluded/test-context.xml index 2cd049c88bf..7d069ff61ad 100644 --- a/framework/spring/module/src/test/resources/testhierarchy/excluded/test-context.xml +++ b/framework/spring/module/src/test/resources/testhierarchy/excluded/test-context.xml @@ -26,7 +26,7 @@ - + diff --git a/framework/spring/module/src/test/resources/testhierarchy/excluded2/test-context.xml b/framework/spring/module/src/test/resources/testhierarchy/excluded2/test-context.xml index 192a376e9a9..aaff5b4ed1e 100644 --- a/framework/spring/module/src/test/resources/testhierarchy/excluded2/test-context.xml +++ b/framework/spring/module/src/test/resources/testhierarchy/excluded2/test-context.xml @@ -26,7 +26,7 @@ - + diff --git a/framework/spring/module/src/test/resources/testhierarchy/orphan-of-excluded/test-context.xml b/framework/spring/module/src/test/resources/testhierarchy/orphan-of-excluded/test-context.xml index 08bd33c52bf..7000fb436f3 100644 --- a/framework/spring/module/src/test/resources/testhierarchy/orphan-of-excluded/test-context.xml +++ b/framework/spring/module/src/test/resources/testhierarchy/orphan-of-excluded/test-context.xml @@ -26,7 +26,7 @@ - + diff --git a/framework/spring/module/src/test/resources/testhierarchy/orphan1/test-context.xml b/framework/spring/module/src/test/resources/testhierarchy/orphan1/test-context.xml index 38e0e326f37..d422ecdd79c 100644 --- a/framework/spring/module/src/test/resources/testhierarchy/orphan1/test-context.xml +++ b/framework/spring/module/src/test/resources/testhierarchy/orphan1/test-context.xml @@ -26,5 +26,5 @@ - + diff --git a/packaging/README.md b/packaging/README.md index 08e34baeae2..78057b828b2 100644 --- a/packaging/README.md +++ b/packaging/README.md @@ -6,9 +6,9 @@ These scripts are also used by the CloudStack team to build packages for the off # Requirements The RPM and DEB packages have dependencies on versions of specific libraries. Due to these dependencies the following distributions and their versions are supported by the packages. -* CentOS / RHEL: 7 and 8 -* Debian 7 (Wheezy) and 8 (Jessy) (untested!) -* Ubuntu: 16.04 (Xenial), 18.04 (Bionic) and 20.04 (Focal) +* CentOS / RHEL: 8 and 9 +* Ubuntu: 20.04, 22.04, 24.04 +* Debian 12 (Bookworm, untested!) # Building Using the scripts in the *packaging* directory the RPM and DEB packages can be build. @@ -38,4 +38,4 @@ The commands above will generate Ubuntu 14.04, 16.04, and 22.04 packages which y ## RPM The *package.sh* script can be used to build RPM packages for CloudStack. In the *packaging* script you can run the following command: -``./package.sh --pack oss --distribution centos7`` +``./package.sh --pack oss --distribution el8`` diff --git a/packaging/centos7/cloud.spec b/packaging/centos7/cloud.spec deleted file mode 100644 index 99ecca784ad..00000000000 --- a/packaging/centos7/cloud.spec +++ /dev/null @@ -1,721 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -%define __os_install_post %{nil} -%global debug_package %{nil} - -# DISABLE the post-percentinstall java repacking and line number stripping -# we need to find a way to just disable the java repacking and line number stripping, but not the autodeps - -Name: cloudstack -Summary: CloudStack IaaS Platform -#http://fedoraproject.org/wiki/PackageNamingGuidelines#Pre-Release_packages -%define _maventag %{_fullver} -Release: %{_rel}%{dist} - -%define __python python3 -%{!?python_sitearch: %define python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")} - -Version: %{_ver} -License: ASL 2.0 -Vendor: Apache CloudStack -Packager: Apache CloudStack -Group: System Environment/Libraries -# FIXME do groups for every single one of the subpackages -Source0: %{name}-%{_maventag}.tgz -BuildRoot: %{_tmppath}/%{name}-%{_maventag}-%{release}-build - -BuildRequires: java-11-openjdk-devel -BuildRequires: ws-commons-util -BuildRequires: jpackage-utils -BuildRequires: gcc -BuildRequires: glibc-devel -BuildRequires: /usr/bin/mkisofs -BuildRequires: mysql-connector-python -BuildRequires: maven => 3.0.0 -BuildRequires: python-setuptools -BuildRequires: python3 -BuildRequires: python3-pip -BuildRequires: python3-setuptools -BuildRequires: wget -BuildRequires: nodejs - -%description -CloudStack is a highly-scalable elastic, open source, -intelligent IaaS cloud implementation. - -%package management -Summary: CloudStack management server UI -Requires: java-11-openjdk -Requires: tzdata-java -Requires: python -Requires: python3 -Requires: bash -Requires: gawk -Requires: which -Requires: file -Requires: bzip2 -Requires: gzip -Requires: unzip -Requires: /sbin/mount.nfs -Requires: openssh-clients -Requires: nfs-utils -Requires: iproute -Requires: wget -Requires: mysql -Requires: sudo -Requires: /sbin/service -Requires: /sbin/chkconfig -Requires: /usr/bin/ssh-keygen -Requires: genisoimage -Requires: ipmitool -Requires: %{name}-common = %{_ver} -Requires: iptables-services -Requires: qemu-img -Requires: rng-tools -Requires: python3-pip -Requires: python3-setuptools -Group: System Environment/Libraries -%description management -The CloudStack management server is the central point of coordination, -management, and intelligence in CloudStack. - -%package common -Summary: Apache CloudStack common files and scripts -Requires: python -Requires: python3 -Requires: python3-pip -Group: System Environment/Libraries -%description common -The Apache CloudStack files shared between agent and management server -%global __requires_exclude ^libuuid\\.so\\.1$ - -%package agent -Summary: CloudStack Agent for KVM hypervisors -Requires: openssh-clients -Requires: java-11-openjdk -Requires: tzdata-java -Requires: %{name}-common = %{_ver} -Requires: libvirt -Requires: bridge-utils -Requires: ebtables -Requires: iptables -Requires: ethtool -Requires: net-tools -Requires: iproute -Requires: ipset -Requires: perl -Requires: python36-libvirt -Requires: qemu-img -Requires: qemu-kvm -Requires: cryptsetup -Requires: rng-tools -Provides: cloud-agent -Group: System Environment/Libraries -%description agent -The CloudStack agent for KVM hypervisors - -%package baremetal-agent -Summary: CloudStack baremetal agent -Requires: tftp-server -Requires: xinetd -Requires: syslinux -Requires: chkconfig -Requires: dhcp -Requires: httpd -Group: System Environment/Libraries -%description baremetal-agent -The CloudStack baremetal agent - -%package usage -Summary: CloudStack Usage calculation server -Requires: java-11-openjdk -Requires: tzdata-java -Group: System Environment/Libraries -%description usage -The CloudStack usage calculation service - -%package ui -Summary: CloudStack UI -Group: System Environment/Libraries -%description ui -The CloudStack UI - -%package cli -Summary: Apache CloudStack CLI -Provides: python-marvin -Group: System Environment/Libraries -%description cli -Apache CloudStack command line interface - -%package marvin -Summary: Apache CloudStack Marvin library -Requires: python3 -Requires: python3-devel -Requires: python3-pip -Requires: python-pip -Requires: gcc -Requires: python-devel -Requires: libffi-devel -Requires: openssl-devel -Group: System Environment/Libraries -%description marvin -Apache CloudStack Marvin library - -%package integration-tests -Summary: Apache CloudStack Marvin integration tests -Requires: %{name}-marvin = %{_ver} -Group: System Environment/Libraries -%description integration-tests -Apache CloudStack Marvin integration tests - -%if "%{_ossnoss}" == "noredist" -%package mysql-ha -Summary: Apache CloudStack Balancing Strategy for MySQL -Group: System Environmnet/Libraries -%description mysql-ha -Apache CloudStack Balancing Strategy for MySQL - -%endif - -%prep -echo Doing CloudStack build - -%setup -q -n %{name}-%{_maventag} - -%build - -cp packaging/centos7/replace.properties build/replace.properties -echo VERSION=%{_maventag} >> build/replace.properties -echo PACKAGE=%{name} >> build/replace.properties -touch build/gitrev.txt -echo $(git rev-parse HEAD) > build/gitrev.txt - -if [ "%{_ossnoss}" == "NOREDIST" -o "%{_ossnoss}" == "noredist" ] ; then - echo "Adding noredist flag to the maven build" - FLAGS="$FLAGS -Dnoredist" -fi - -if [ "%{_sim}" == "SIMULATOR" -o "%{_sim}" == "simulator" ] ; then - echo "Adding simulator flag to the maven build" - FLAGS="$FLAGS -Dsimulator" -fi - -if [ \"%{_temp}\" != "" ]; then - echo "Adding flags to package requested templates" - FLAGS="$FLAGS `rpm --eval %{?_temp}`" -fi - -mvn -Psystemvm,developer $FLAGS clean package -cd ui && npm install && npm run build && cd .. - -%install -[ ${RPM_BUILD_ROOT} != "/" ] && rm -rf ${RPM_BUILD_ROOT} -# Common directories -mkdir -p ${RPM_BUILD_ROOT}%{_bindir} -mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/log/%{name}/agent -mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/log/%{name}/ipallocator -mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/cache/%{name}/management/work -mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/cache/%{name}/management/temp -mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/%{name}/mnt -mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/%{name}/management -mkdir -p ${RPM_BUILD_ROOT}%{_initrddir} -mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/default -mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/profile.d -mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/sudoers.d - -# Common -mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/scripts -mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/vms -mkdir -p ${RPM_BUILD_ROOT}%{python_sitearch}/ -mkdir -p ${RPM_BUILD_ROOT}/usr/bin -cp -r scripts/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/scripts -install -D systemvm/dist/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/vms/ -install python/lib/cloud_utils.py ${RPM_BUILD_ROOT}%{python_sitearch}/cloud_utils.py -cp -r python/lib/cloudutils ${RPM_BUILD_ROOT}%{python_sitearch}/ -python3 -m py_compile ${RPM_BUILD_ROOT}%{python_sitearch}/cloud_utils.py -python3 -m compileall ${RPM_BUILD_ROOT}%{python_sitearch}/cloudutils -cp build/gitrev.txt ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/scripts -cp packaging/centos7/cloudstack-sccs ${RPM_BUILD_ROOT}/usr/bin - -mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/scripts/network/cisco -cp -r plugins/network-elements/cisco-vnmc/src/main/scripts/network/cisco/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/scripts/network/cisco - -# Management -mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/ -mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/lib -mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup -mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/log/%{name}/management -mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management -mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/run -mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup/wheel - -# Setup Jetty -ln -sf /etc/%{name}/management ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/conf -ln -sf /var/log/%{name}/management ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/logs - -install -D client/target/utilities/bin/cloud-migrate-databases ${RPM_BUILD_ROOT}%{_bindir}/%{name}-migrate-databases -install -D client/target/utilities/bin/cloud-set-guest-password ${RPM_BUILD_ROOT}%{_bindir}/%{name}-set-guest-password -install -D client/target/utilities/bin/cloud-set-guest-sshkey ${RPM_BUILD_ROOT}%{_bindir}/%{name}-set-guest-sshkey -install -D client/target/utilities/bin/cloud-setup-databases ${RPM_BUILD_ROOT}%{_bindir}/%{name}-setup-databases -install -D client/target/utilities/bin/cloud-setup-encryption ${RPM_BUILD_ROOT}%{_bindir}/%{name}-setup-encryption -install -D client/target/utilities/bin/cloud-setup-management ${RPM_BUILD_ROOT}%{_bindir}/%{name}-setup-management -install -D client/target/utilities/bin/cloud-setup-baremetal ${RPM_BUILD_ROOT}%{_bindir}/%{name}-setup-baremetal -install -D client/target/utilities/bin/cloud-sysvmadm ${RPM_BUILD_ROOT}%{_bindir}/%{name}-sysvmadm -install -D client/target/utilities/bin/cloud-update-xenserver-licenses ${RPM_BUILD_ROOT}%{_bindir}/%{name}-update-xenserver-licenses -# Bundle cmk in cloudstack-management -wget https://github.com/apache/cloudstack-cloudmonkey/releases/download/6.3.0/cmk.linux.x86-64 -O ${RPM_BUILD_ROOT}%{_bindir}/cmk -chmod +x ${RPM_BUILD_ROOT}%{_bindir}/cmk - -cp -r client/target/utilities/scripts/db/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup - -cp -r client/target/cloud-client-ui-%{_maventag}.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/ -cp -r client/target/classes/META-INF/webapp ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapp -cp ui/dist/config.json ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management/ -cp -r ui/dist/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapp/ -rm -f ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapp/config.json -ln -sf /etc/%{name}/management/config.json ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapp/config.json -mv ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/cloud-client-ui-%{_maventag}.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/lib/cloudstack-%{_maventag}.jar -cp client/target/lib/*jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/lib/ - -# Don't package the scripts in the management webapp -rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapps/client/WEB-INF/classes/scripts -rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapps/client/WEB-INF/classes/vms - -for name in db.properties server.properties log4j-cloud.xml environment.properties java.security.ciphers -do - cp client/target/conf/$name ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management/$name -done - -ln -sf log4j-cloud.xml ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management/log4j2.xml - -install python/bindir/cloud-external-ipallocator.py ${RPM_BUILD_ROOT}%{_bindir}/%{name}-external-ipallocator.py -install -D client/target/pythonlibs/jasypt-1.9.3.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/lib/jasypt-1.9.3.jar -install -D utils/target/cloud-utils-%{_maventag}-bundled.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/lib/%{name}-utils.jar - -install -D packaging/centos7/cloud-ipallocator.rc ${RPM_BUILD_ROOT}%{_initrddir}/%{name}-ipallocator -install -D packaging/centos7/cloud.limits ${RPM_BUILD_ROOT}%{_sysconfdir}/security/limits.d/cloud -install -D packaging/systemd/cloudstack-management.service ${RPM_BUILD_ROOT}%{_unitdir}/%{name}-management.service -install -D packaging/systemd/cloudstack-management.default ${RPM_BUILD_ROOT}%{_sysconfdir}/default/%{name}-management -install -D server/target/conf/cloudstack-sudoers ${RPM_BUILD_ROOT}%{_sysconfdir}/sudoers.d/%{name}-management -touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid -#install -D server/target/conf/cloudstack-catalina.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-catalina - -# SystemVM template -mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm -cp -r engine/schema/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm -rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm/md5sum.txt - -# UI -mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui -mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/ -cp -r client/target/classes/META-INF/webapp/WEB-INF ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui -cp ui/dist/config.json ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui/ -cp -r ui/dist/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/ -rm -f ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/config.json -ln -sf /etc/%{name}/ui/config.json ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/config.json - -# Package mysql-connector-python -wget -P ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup/wheel https://files.pythonhosted.org/packages/ee/ff/48bde5c0f013094d729fe4b0316ba2a24774b3ff1c52d924a8a4cb04078a/six-1.15.0-py2.py3-none-any.whl -wget -P ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup/wheel https://files.pythonhosted.org/packages/e9/93/4860cebd5ad3ff2664ad3c966490ccb46e3b88458b2095145bca11727ca4/setuptools-47.3.1-py3-none-any.whl -wget -P ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup/wheel https://files.pythonhosted.org/packages/28/05/9867ef8eafd12265267bee138fa2c46ebf34a276ea4cbe184cba4c606e8b/protobuf-3.12.2-cp36-cp36m-manylinux1_x86_64.whl -wget -P ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup/wheel https://files.pythonhosted.org/packages/d1/53/4cf90d2fe81b9cdb55dc180951bcec44ea8685665f1bdb1412501dc362dd/mysql_connector_python-8.0.20-cp36-cp36m-manylinux1_x86_64.whl - -chmod 440 ${RPM_BUILD_ROOT}%{_sysconfdir}/sudoers.d/%{name}-management -chmod 770 ${RPM_BUILD_ROOT}%{_localstatedir}/%{name}/mnt -chmod 770 ${RPM_BUILD_ROOT}%{_localstatedir}/%{name}/management -chmod 770 ${RPM_BUILD_ROOT}%{_localstatedir}/cache/%{name}/management/work -chmod 770 ${RPM_BUILD_ROOT}%{_localstatedir}/cache/%{name}/management/temp -chmod 770 ${RPM_BUILD_ROOT}%{_localstatedir}/log/%{name}/management -chmod 770 ${RPM_BUILD_ROOT}%{_localstatedir}/log/%{name}/agent - -# KVM Agent -mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent -mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/log/%{name}/agent -mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib -mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/plugins -install -D packaging/systemd/cloudstack-agent.service ${RPM_BUILD_ROOT}%{_unitdir}/%{name}-agent.service -install -D packaging/systemd/cloudstack-rolling-maintenance@.service ${RPM_BUILD_ROOT}%{_unitdir}/%{name}-rolling-maintenance@.service -install -D packaging/systemd/cloudstack-agent.default ${RPM_BUILD_ROOT}%{_sysconfdir}/default/%{name}-agent -install -D agent/target/transformed/agent.properties ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent/agent.properties -install -D agent/target/transformed/environment.properties ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent/environment.properties -install -D agent/target/transformed/log4j-cloud.xml ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent/log4j-cloud.xml -install -D agent/target/transformed/cloud-setup-agent ${RPM_BUILD_ROOT}%{_bindir}/%{name}-setup-agent -install -D agent/target/transformed/cloudstack-agent-upgrade ${RPM_BUILD_ROOT}%{_bindir}/%{name}-agent-upgrade -install -D agent/target/transformed/cloud-guest-tool ${RPM_BUILD_ROOT}%{_bindir}/%{name}-guest-tool -install -D agent/target/transformed/libvirtqemuhook ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib/libvirtqemuhook -install -D agent/target/transformed/rolling-maintenance ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib/rolling-maintenance -install -D agent/target/transformed/cloud-ssh ${RPM_BUILD_ROOT}%{_bindir}/%{name}-ssh -install -D agent/target/transformed/cloudstack-agent-profile.sh ${RPM_BUILD_ROOT}%{_sysconfdir}/profile.d/%{name}-agent-profile.sh -install -D agent/target/transformed/cloudstack-agent.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-agent -install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-%{_maventag}.jar ${RPM_BUILD_ROOT}%{_datadir}/%name-agent/lib/cloud-plugin-hypervisor-kvm-%{_maventag}.jar -cp plugins/hypervisors/kvm/target/dependencies/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib -cp plugins/storage/volume/storpool/target/*.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib -cp plugins/storage/volume/linstor/target/*.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib - -# Usage server -mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/usage -mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-usage/lib -install -D usage/target/cloud-usage-%{_maventag}.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-usage/cloud-usage-%{_maventag}.jar -install -D usage/target/transformed/db.properties ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/usage/db.properties -install -D usage/target/transformed/log4j-cloud_usage.xml ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/usage/log4j-cloud.xml -cp usage/target/dependencies/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-usage/lib/ -cp client/target/lib/mysql*jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-usage/lib/ -install -D packaging/systemd/cloudstack-usage.service ${RPM_BUILD_ROOT}%{_unitdir}/%{name}-usage.service -install -D packaging/systemd/cloudstack-usage.default ${RPM_BUILD_ROOT}%{_sysconfdir}/default/%{name}-usage -mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/log/%{name}/usage/ - -# CLI -cp -r cloud-cli/cloudtool ${RPM_BUILD_ROOT}%{python_sitearch}/ -install cloud-cli/cloudapis/cloud.py ${RPM_BUILD_ROOT}%{python_sitearch}/cloudapis.py - -# Marvin -mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-marvin -cp tools/marvin/dist/Marvin-*.tar.gz ${RPM_BUILD_ROOT}%{_datadir}/%{name}-marvin/ - -# integration-tests -mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-integration-tests -cp -r test/integration/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-integration-tests/ - -# MYSQL HA -if [ "x%{_ossnoss}" == "xnoredist" ] ; then - mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-mysql-ha/lib - cp -r plugins/database/mysql-ha/target/cloud-plugin-database-mysqlha-%{_maventag}.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-mysql-ha/lib -fi - -#License files from whisker -install -D tools/whisker/NOTICE ${RPM_BUILD_ROOT}%{_defaultdocdir}/%{name}-management-%{version}/NOTICE -install -D tools/whisker/LICENSE ${RPM_BUILD_ROOT}%{_defaultdocdir}/%{name}-management-%{version}/LICENSE -install -D tools/whisker/NOTICE ${RPM_BUILD_ROOT}%{_defaultdocdir}/%{name}-common-%{version}/NOTICE -install -D tools/whisker/LICENSE ${RPM_BUILD_ROOT}%{_defaultdocdir}/%{name}-common-%{version}/LICENSE -install -D tools/whisker/NOTICE ${RPM_BUILD_ROOT}%{_defaultdocdir}/%{name}-agent-%{version}/NOTICE -install -D tools/whisker/LICENSE ${RPM_BUILD_ROOT}%{_defaultdocdir}/%{name}-agent-%{version}/LICENSE -install -D tools/whisker/NOTICE ${RPM_BUILD_ROOT}%{_defaultdocdir}/%{name}-usage-%{version}/NOTICE -install -D tools/whisker/LICENSE ${RPM_BUILD_ROOT}%{_defaultdocdir}/%{name}-usage-%{version}/LICENSE -install -D tools/whisker/NOTICE ${RPM_BUILD_ROOT}%{_defaultdocdir}/%{name}-ui-%{version}/NOTICE -install -D tools/whisker/LICENSE ${RPM_BUILD_ROOT}%{_defaultdocdir}/%{name}-ui-%{version}/LICENSE -install -D tools/whisker/NOTICE ${RPM_BUILD_ROOT}%{_defaultdocdir}/%{name}-cli-%{version}/NOTICE -install -D tools/whisker/LICENSE ${RPM_BUILD_ROOT}%{_defaultdocdir}/%{name}-cli-%{version}/LICENSE -install -D tools/whisker/NOTICE ${RPM_BUILD_ROOT}%{_defaultdocdir}/%{name}-marvin-%{version}/NOTICE -install -D tools/whisker/LICENSE ${RPM_BUILD_ROOT}%{_defaultdocdir}/%{name}-marvin-%{version}/LICENSE -install -D tools/whisker/NOTICE ${RPM_BUILD_ROOT}%{_defaultdocdir}/%{name}-integration-tests-%{version}/NOTICE -install -D tools/whisker/LICENSE ${RPM_BUILD_ROOT}%{_defaultdocdir}/%{name}-integration-tests-%{version}/LICENSE - -%clean -[ ${RPM_BUILD_ROOT} != "/" ] && rm -rf ${RPM_BUILD_ROOT} - -%preun management -/usr/bin/systemctl stop cloudstack-management || true -/usr/bin/systemctl disable cloudstack-management || true - -%pre management -id cloud > /dev/null 2>&1 || /usr/sbin/useradd -M -U -c "CloudStack unprivileged user" \ - -r -s /bin/sh -d %{_localstatedir}/cloudstack/management cloud|| true - -rm -rf %{_localstatedir}/cache/cloudstack - -# in case of upgrade to 4.9+ copy commands.properties if not exists in /etc/cloudstack/management/ -if [ "$1" == "2" ] ; then - if [ -f "%{_datadir}/%{name}-management/webapps/client/WEB-INF/classes/commands.properties" ] && [ ! -f "%{_sysconfdir}/%{name}/management/commands.properties" ] ; then - cp -p %{_datadir}/%{name}-management/webapps/client/WEB-INF/classes/commands.properties %{_sysconfdir}/%{name}/management/commands.properties - fi -fi - -# Remove old tomcat symlinks and env config file -if [ -L "%{_datadir}/%{name}-management/lib" ] -then - rm -f %{_datadir}/%{name}-management/bin - rm -f %{_datadir}/%{name}-management/lib - rm -f %{_datadir}/%{name}-management/temp - rm -f %{_datadir}/%{name}-management/work - rm -f %{_sysconfdir}/default/%{name}-management -fi - -%post management -# Install mysql-connector-python -pip3 install %{_datadir}/%{name}-management/setup/wheel/six-1.15.0-py2.py3-none-any.whl %{_datadir}/%{name}-management/setup/wheel/setuptools-47.3.1-py3-none-any.whl %{_datadir}/%{name}-management/setup/wheel/protobuf-3.12.2-cp36-cp36m-manylinux1_x86_64.whl %{_datadir}/%{name}-management/setup/wheel/mysql_connector_python-8.0.20-cp36-cp36m-manylinux1_x86_64.whl - -pip3 install urllib3 - -/usr/bin/systemctl enable cloudstack-management > /dev/null 2>&1 || true -/usr/bin/systemctl enable --now rngd > /dev/null 2>&1 || true - -grep -s -q "db.cloud.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties" || sed -i -e "\$adb.cloud.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties" -grep -s -q "db.usage.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties" || sed -i -e "\$adb.usage.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties" -grep -s -q "db.simulator.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties" || sed -i -e "\$adb.simulator.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties" - -# Update DB properties having master and slave(s), with source and replica(s) respectively (for inclusiveness) -grep -s -q "^db.cloud.slaves=" "%{_sysconfdir}/%{name}/management/db.properties" && sed -i "s/^db.cloud.slaves=/db.cloud.replicas=/g" "%{_sysconfdir}/%{name}/management/db.properties" -grep -s -q "^db.cloud.secondsBeforeRetryMaster=" "%{_sysconfdir}/%{name}/management/db.properties" && sed -i "s/^db.cloud.secondsBeforeRetryMaster=/db.cloud.secondsBeforeRetrySource=/g" "%{_sysconfdir}/%{name}/management/db.properties" -grep -s -q "^db.cloud.queriesBeforeRetryMaster=" "%{_sysconfdir}/%{name}/management/db.properties" && sed -i "s/^db.cloud.queriesBeforeRetryMaster=/db.cloud.queriesBeforeRetrySource=/g" "%{_sysconfdir}/%{name}/management/db.properties" -grep -s -q "^db.usage.slaves=" "%{_sysconfdir}/%{name}/management/db.properties" && sed -i "s/^db.usage.slaves=/db.usage.replicas=/g" "%{_sysconfdir}/%{name}/management/db.properties" -grep -s -q "^db.usage.secondsBeforeRetryMaster=" "%{_sysconfdir}/%{name}/management/db.properties" && sed -i "s/^db.usage.secondsBeforeRetryMaster=/db.usage.secondsBeforeRetrySource=/g" "%{_sysconfdir}/%{name}/management/db.properties" -grep -s -q "^db.usage.queriesBeforeRetryMaster=" "%{_sysconfdir}/%{name}/management/db.properties" && sed -i "s/^db.usage.queriesBeforeRetryMaster=/db.usage.queriesBeforeRetrySource=/g" "%{_sysconfdir}/%{name}/management/db.properties" - -if [ ! -f %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/vhd-util ] ; then - echo Please download vhd-util from http://download.cloudstack.org/tools/vhd-util and put it in - echo %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/ -fi - -if [ -f %{_sysconfdir}/sysconfig/%{name}-management ] ; then - rm -f %{_sysconfdir}/sysconfig/%{name}-management -fi - -chown -R cloud:cloud /var/log/cloudstack/management - -systemctl daemon-reload - -%posttrans management -# Print help message -if [ -f "/usr/share/cloudstack-common/scripts/installer/cloudstack-help-text" ];then - sed -i "s,^ACS_VERSION=.*,ACS_VERSION=%{_maventag},g" /usr/share/cloudstack-common/scripts/installer/cloudstack-help-text - /usr/share/cloudstack-common/scripts/installer/cloudstack-help-text management -fi - -%preun agent -/sbin/service cloudstack-agent stop || true -if [ "$1" == "0" ] ; then - /sbin/chkconfig --del cloudstack-agent > /dev/null 2>&1 || true -fi - -%pre agent - -# save old configs if they exist (for upgrade). Otherwise we may lose them -# when the old packages are erased. There are a lot of properties files here. -if [ -d "%{_sysconfdir}/cloud" ] ; then - mv %{_sysconfdir}/cloud %{_sysconfdir}/cloud.rpmsave -fi - -%post agent -if [ "$1" == "2" ] ; then - echo "Running %{_bindir}/%{name}-agent-upgrade to update bridge name for upgrade from CloudStack 4.0.x (and before) to CloudStack 4.1 (and later)" - %{_bindir}/%{name}-agent-upgrade -fi -if [ ! -d %{_sysconfdir}/libvirt/hooks ] ; then - mkdir %{_sysconfdir}/libvirt/hooks -fi -cp -a ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib/libvirtqemuhook %{_sysconfdir}/libvirt/hooks/qemu -mkdir -m 0755 -p /usr/share/cloudstack-agent/tmp -/usr/bin/systemctl restart libvirtd -/usr/bin/systemctl enable cloudstack-agent > /dev/null 2>&1 || true -/usr/bin/systemctl enable cloudstack-rolling-maintenance@p > /dev/null 2>&1 || true -/usr/bin/systemctl enable --now rngd > /dev/null 2>&1 || true - -# if saved configs from upgrade exist, copy them over -if [ -f "%{_sysconfdir}/cloud.rpmsave/agent/agent.properties" ]; then - mv %{_sysconfdir}/%{name}/agent/agent.properties %{_sysconfdir}/%{name}/agent/agent.properties.rpmnew - cp -p %{_sysconfdir}/cloud.rpmsave/agent/agent.properties %{_sysconfdir}/%{name}/agent - # make sure we only do this on the first install of this RPM, don't want to overwrite on a reinstall - mv %{_sysconfdir}/cloud.rpmsave/agent/agent.properties %{_sysconfdir}/cloud.rpmsave/agent/agent.properties.rpmsave -fi - -systemctl daemon-reload - -%posttrans agent -# Print help message -if [ -f "/usr/share/cloudstack-common/scripts/installer/cloudstack-help-text" ];then - sed -i "s,^ACS_VERSION=.*,ACS_VERSION=%{_maventag},g" /usr/share/cloudstack-common/scripts/installer/cloudstack-help-text - /usr/share/cloudstack-common/scripts/installer/cloudstack-help-text agent -fi - -%pre usage -id cloud > /dev/null 2>&1 || /usr/sbin/useradd -M -U -c "CloudStack unprivileged user" \ - -r -s /bin/sh -d %{_localstatedir}/cloudstack/management cloud|| true - -%preun usage -/sbin/service cloudstack-usage stop || true -if [ "$1" == "0" ] ; then - /sbin/chkconfig --del cloudstack-usage > /dev/null 2>&1 || true -fi - -%post usage -if [ -f "%{_sysconfdir}/%{name}/management/db.properties" ]; then - echo "Replacing usage server's db.properties with a link to the management server's db.properties" - rm -f %{_sysconfdir}/%{name}/usage/db.properties - ln -s %{_sysconfdir}/%{name}/management/db.properties %{_sysconfdir}/%{name}/usage/db.properties - /usr/bin/systemctl enable cloudstack-usage > /dev/null 2>&1 || true -fi - -if [ -f "%{_sysconfdir}/%{name}/management/key" ]; then - echo "Replacing usage server's key with a link to the management server's key" - rm -f %{_sysconfdir}/%{name}/usage/key - ln -s %{_sysconfdir}/%{name}/management/key %{_sysconfdir}/%{name}/usage/key -fi - -if [ ! -f "%{_sysconfdir}/%{name}/usage/key" ]; then - ln -s %{_sysconfdir}/%{name}/management/key %{_sysconfdir}/%{name}/usage/key -fi - -mkdir -p /usr/local/libexec -if [ ! -f "/usr/local/libexec/sanity-check-last-id" ]; then - echo 1 > /usr/local/libexec/sanity-check-last-id -fi -chown cloud:cloud /usr/local/libexec/sanity-check-last-id - -%posttrans usage -# Print help message -if [ -f "/usr/share/cloudstack-common/scripts/installer/cloudstack-help-text" ];then - sed -i "s,^ACS_VERSION=.*,ACS_VERSION=%{_maventag},g" /usr/share/cloudstack-common/scripts/installer/cloudstack-help-text - /usr/share/cloudstack-common/scripts/installer/cloudstack-help-text usage -fi - -%post marvin -pip install --upgrade https://files.pythonhosted.org/packages/ca/ea/1e2553b088bad2f9fa8120c2624f797b2d7450d3b61bb492d29c72e3d3c2/mysql_connector_python-8.0.20-cp27-cp27mu-manylinux1_x86_64.whl -pip install --upgrade /usr/share/cloudstack-marvin/Marvin-*.tar.gz -pip3 install --upgrade /usr/share/cloudstack-marvin/Marvin-*.tar.gz -pip3 install --upgrade nose -pip3 install --upgrade urllib3 - -#No default permission as the permission setup is complex -%files management -%defattr(-,root,root,-) -%dir %{_datadir}/%{name}-management -%dir %attr(0770,root,cloud) %{_localstatedir}/%{name}/mnt -%dir %attr(0770,cloud,cloud) %{_localstatedir}/%{name}/management -%dir %attr(0770,root,cloud) %{_localstatedir}/cache/%{name}/management -%dir %attr(0770,root,cloud) %{_localstatedir}/log/%{name}/management -%config(noreplace) %{_sysconfdir}/default/%{name}-management -%config(noreplace) %{_sysconfdir}/sudoers.d/%{name}-management -%config(noreplace) %{_sysconfdir}/security/limits.d/cloud -%config(noreplace) %attr(0640,root,cloud) %{_sysconfdir}/%{name}/management/db.properties -%config(noreplace) %attr(0640,root,cloud) %{_sysconfdir}/%{name}/management/server.properties -%config(noreplace) %attr(0640,root,cloud) %{_sysconfdir}/%{name}/management/config.json -%config(noreplace) %{_sysconfdir}/%{name}/management/log4j-cloud.xml -%config(noreplace) %{_sysconfdir}/%{name}/management/log4j2.xml -%config(noreplace) %{_sysconfdir}/%{name}/management/environment.properties -%config(noreplace) %{_sysconfdir}/%{name}/management/java.security.ciphers -%attr(0644,root,root) %{_unitdir}/%{name}-management.service -%attr(0755,cloud,cloud) %{_localstatedir}/run/%{name}-management.pid -%attr(0755,root,root) %{_bindir}/%{name}-setup-management -%attr(0755,root,root) %{_bindir}/%{name}-update-xenserver-licenses -%{_datadir}/%{name}-management/conf -%{_datadir}/%{name}-management/lib/*.jar -%{_datadir}/%{name}-management/logs -%{_datadir}/%{name}-management/templates -%attr(0755,root,root) %{_bindir}/%{name}-setup-databases -%attr(0755,root,root) %{_bindir}/%{name}-migrate-databases -%attr(0755,root,root) %{_bindir}/%{name}-set-guest-password -%attr(0755,root,root) %{_bindir}/%{name}-set-guest-sshkey -%attr(0755,root,root) %{_bindir}/%{name}-sysvmadm -%attr(0755,root,root) %{_bindir}/%{name}-setup-encryption -%attr(0755,root,root) %{_bindir}/cmk -%{_datadir}/%{name}-management/setup/*.sql -%{_datadir}/%{name}-management/setup/*.sh -%{_datadir}/%{name}-management/setup/server-setup.xml -%{_datadir}/%{name}-management/webapp/* -%attr(0755,root,root) %{_bindir}/%{name}-external-ipallocator.py -%attr(0755,root,root) %{_initrddir}/%{name}-ipallocator -%dir %attr(0770,root,root) %{_localstatedir}/log/%{name}/ipallocator -%{_defaultdocdir}/%{name}-management-%{version}/LICENSE -%{_defaultdocdir}/%{name}-management-%{version}/NOTICE -#%attr(0644,root,root) %{_sysconfdir}/logrotate.d/%{name}-catalina -%{_datadir}/%{name}-management/setup/wheel/*.whl - -%files agent -%attr(0755,root,root) %{_bindir}/%{name}-setup-agent -%attr(0755,root,root) %{_bindir}/%{name}-agent-upgrade -%attr(0755,root,root) %{_bindir}/%{name}-guest-tool -%attr(0755,root,root) %{_bindir}/%{name}-ssh -%attr(0644,root,root) %{_unitdir}/%{name}-agent.service -%attr(0644,root,root) %{_unitdir}/%{name}-rolling-maintenance@.service -%config(noreplace) %{_sysconfdir}/default/%{name}-agent -%attr(0644,root,root) %{_sysconfdir}/profile.d/%{name}-agent-profile.sh -%config(noreplace) %attr(0644,root,root) %{_sysconfdir}/logrotate.d/%{name}-agent -%attr(0755,root,root) %{_datadir}/%{name}-common/scripts/network/cisco -%config(noreplace) %{_sysconfdir}/%{name}/agent -%dir %{_localstatedir}/log/%{name}/agent -%attr(0644,root,root) %{_datadir}/%{name}-agent/lib/*.jar -%attr(0755,root,root) %{_datadir}/%{name}-agent/lib/libvirtqemuhook -%attr(0755,root,root) %{_datadir}/%{name}-agent/lib/rolling-maintenance -%dir %{_datadir}/%{name}-agent/plugins -%{_defaultdocdir}/%{name}-agent-%{version}/LICENSE -%{_defaultdocdir}/%{name}-agent-%{version}/NOTICE - -%files common -%dir %attr(0755,root,root) %{python_sitearch}/cloudutils -%dir %attr(0755,root,root) %{_datadir}/%{name}-common/vms -%attr(0755,root,root) %{_datadir}/%{name}-common/scripts -%attr(0755,root,root) /usr/bin/cloudstack-sccs -%attr(0644, root, root) %{_datadir}/%{name}-common/vms/agent.zip -%attr(0644, root, root) %{_datadir}/%{name}-common/vms/cloud-scripts.tgz -%attr(0644, root, root) %{_datadir}/%{name}-common/vms/patch-sysvms.sh -%attr(0644,root,root) %{python_sitearch}/cloud_utils.py -%attr(0644,root,root) %{python_sitearch}/__pycache__/* -%attr(0644,root,root) %{python_sitearch}/cloudutils/* -%attr(0644, root, root) %{_datadir}/%{name}-common/lib/jasypt-1.9.3.jar -%attr(0644, root, root) %{_datadir}/%{name}-common/lib/%{name}-utils.jar -%{_defaultdocdir}/%{name}-common-%{version}/LICENSE -%{_defaultdocdir}/%{name}-common-%{version}/NOTICE - -%files ui -%config(noreplace) %attr(0640,root,cloud) %{_sysconfdir}/%{name}/ui/config.json -%{_datadir}/%{name}-ui/* -%{_defaultdocdir}/%{name}-ui-%{version}/LICENSE -%{_defaultdocdir}/%{name}-ui-%{version}/NOTICE - -%files usage -%attr(0644,root,root) %{_unitdir}/%{name}-usage.service -%config(noreplace) %{_sysconfdir}/default/%{name}-usage -%attr(0644,root,root) %{_datadir}/%{name}-usage/*.jar -%attr(0644,root,root) %{_datadir}/%{name}-usage/lib/*.jar -%dir %attr(0770,root,cloud) %{_localstatedir}/log/%{name}/usage -%attr(0644,root,root) %{_sysconfdir}/%{name}/usage/db.properties -%attr(0644,root,root) %{_sysconfdir}/%{name}/usage/log4j-cloud.xml -%{_defaultdocdir}/%{name}-usage-%{version}/LICENSE -%{_defaultdocdir}/%{name}-usage-%{version}/NOTICE - -%files cli -%attr(0644,root,root) %{python_sitearch}/cloudapis.py -%attr(0644,root,root) %{python_sitearch}/cloudtool/__init__.py -%attr(0644,root,root) %{python_sitearch}/cloudtool/utils.py -%{_defaultdocdir}/%{name}-cli-%{version}/LICENSE -%{_defaultdocdir}/%{name}-cli-%{version}/NOTICE - -%files marvin -%attr(0644,root,root) %{_datadir}/%{name}-marvin/Marvin*.tar.gz -%{_defaultdocdir}/%{name}-marvin-%{version}/LICENSE -%{_defaultdocdir}/%{name}-marvin-%{version}/NOTICE - -%files integration-tests -%attr(0755,root,root) %{_datadir}/%{name}-integration-tests/* -%{_defaultdocdir}/%{name}-integration-tests-%{version}/LICENSE -%{_defaultdocdir}/%{name}-integration-tests-%{version}/NOTICE - -%if "%{_ossnoss}" == "noredist" -%files mysql-ha -%defattr(0644,cloud,cloud,0755) -%attr(0644,root,root) %{_datadir}/%{name}-mysql-ha/lib/* -%endif - -%files baremetal-agent -%attr(0755,root,root) %{_bindir}/cloudstack-setup-baremetal - -%changelog -* Fri Oct 14 2022 Daan Hoogland 4.18.0 -- initialising sanity check pointer file - -* Thu Apr 30 2015 Rohit Yadav 4.6.0 -- Remove awsapi package - -* Wed Nov 19 2014 Hugo Trippaers 4.6.0 -- Create a specific spec for CentOS 7 - -* Fri Jul 4 2014 Hugo Trippaers 4.5.0 -- Add a package for the mysql ha module - -* Fri Oct 5 2012 Hugo Trippaers 4.1.0 -- new style spec file diff --git a/packaging/centos7/replace.properties b/packaging/centos7/replace.properties deleted file mode 100644 index 8c3560dd462..00000000000 --- a/packaging/centos7/replace.properties +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -DBUSER=cloud -DBPW=cloud -DBROOTPW= -MSLOG=vmops.log -APISERVERLOG=api.log -DBHOST=localhost -DBDRIVER=jdbc:mysql -COMPONENTS-SPEC=components-premium.xml -REMOTEHOST=localhost -AGENTCLASSPATH= -AGENTLOG=/var/log/cloudstack/agent/agent.log -AGENTLOGDIR=/var/log/cloudstack/agent/ -AGENTSYSCONFDIR=/etc/cloudstack/agent -APISERVERLOG=/var/log/cloudstack/management/apilog.log -BINDIR=/usr/bin -COMMONLIBDIR=/usr/share/cloudstack-common -CONFIGUREVARS= -DEPSCLASSPATH= -DOCDIR= -IPALOCATORLOG=/var/log/cloudstack/management/ipallocator.log -JAVADIR=/usr/share/java -LIBEXECDIR=/usr/libexec -LOCKDIR=/var/lock -MSCLASSPATH= -MSCONF=/etc/cloudstack/management -MSENVIRON=/usr/share/cloudstack-management -MSLOG=/var/log/cloudstack/management/management-server.log -MSLOGDIR=/var/log/cloudstack/management/ -MSMNTDIR=/var/cloudstack/mnt -MSUSER=cloud -PIDDIR=/var/run -PLUGINJAVADIR=/usr/share/cloudstack-management/plugin -PREMIUMJAVADIR=/usr/share/cloudstack-management/premium -PYTHONDIR=/usr/lib/python3.6/site-packages/ -SERVERSYSCONFDIR=/etc/sysconfig -SETUPDATADIR=/usr/share/cloudstack-management/setup -SYSCONFDIR=/etc/sysconfig -SYSTEMCLASSPATH= -SYSTEMJARS= -USAGECLASSPATH= -USAGELOG=/var/log/cloudstack/usage/usage.log -USAGESYSCONFDIR=/etc/sysconfig diff --git a/packaging/centos8 b/packaging/centos8 new file mode 120000 index 00000000000..4dad90d45e0 --- /dev/null +++ b/packaging/centos8 @@ -0,0 +1 @@ +el8 \ No newline at end of file diff --git a/packaging/centos8/cloud-ipallocator.rc b/packaging/centos8/cloud-ipallocator.rc deleted file mode 100755 index 255725b94d0..00000000000 --- a/packaging/centos8/cloud-ipallocator.rc +++ /dev/null @@ -1,95 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# chkconfig: 35 99 10 -# description: Cloud Agent - -# WARNING: if this script is changed, then all other initscripts MUST BE changed to match it as well - -. /etc/rc.d/init.d/functions - -# set environment variables - -SHORTNAME="$(basename $(readlink -f $0))" -PIDFILE=/var/run/"$SHORTNAME".pid -LOCKFILE=/var/lock/subsys/"$SHORTNAME" -LOGFILE=/var/log/cloudstack/ipallocator/ipallocator.log -PROGNAME="External IPAllocator" - -unset OPTIONS -[ -r /etc/sysconfig/"$SHORTNAME" ] && source /etc/sysconfig/"$SHORTNAME" -DAEMONIZE=/usr/bin/cloud-daemonize -PROG=/usr/bin/cloud-external-ipallocator.py -OPTIONS=8083 - -start() { - echo -n $"Starting $PROGNAME: " - if hostname --fqdn >/dev/null 2>&1 ; then - daemon --check=$SHORTNAME --pidfile=${PIDFILE} "$DAEMONIZE" \ - -n "$SHORTNAME" -p "$PIDFILE" -l "$LOGFILE" "$PROG" $OPTIONS - RETVAL=$? - echo - else - failure - echo - echo The host name does not resolve properly to an IP address. Cannot start "$PROGNAME". > /dev/stderr - RETVAL=9 - fi - [ $RETVAL = 0 ] && touch ${LOCKFILE} - return $RETVAL -} - -stop() { - echo -n $"Stopping $PROGNAME: " - killproc -p ${PIDFILE} $SHORTNAME # -d 10 $SHORTNAME - RETVAL=$? - echo - [ $RETVAL = 0 ] && rm -f ${LOCKFILE} ${PIDFILE} -} - - -# See how we were called. -case "$1" in - start) - start - ;; - stop) - stop - ;; - status) - status -p ${PIDFILE} $SHORTNAME - RETVAL=$? - ;; - restart) - stop - sleep 3 - start - ;; - condrestart) - if status -p ${PIDFILE} $SHORTNAME >&/dev/null; then - stop - sleep 3 - start - fi - ;; - *) - echo $"Usage: $SHORTNAME {start|stop|restart|condrestart|status|help}" - RETVAL=3 -esac - -exit $RETVAL diff --git a/packaging/debian/replace.properties b/packaging/debian/replace.properties index 5007360a2b7..db88310d81c 100644 --- a/packaging/debian/replace.properties +++ b/packaging/debian/replace.properties @@ -5,9 +5,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/packaging/centos7/cloud-ipallocator.rc b/packaging/el8/cloud-ipallocator.rc similarity index 100% rename from packaging/centos7/cloud-ipallocator.rc rename to packaging/el8/cloud-ipallocator.rc diff --git a/packaging/centos7/cloud.limits b/packaging/el8/cloud.limits similarity index 100% rename from packaging/centos7/cloud.limits rename to packaging/el8/cloud.limits diff --git a/packaging/centos8/cloud.spec b/packaging/el8/cloud.spec similarity index 97% rename from packaging/centos8/cloud.spec rename to packaging/el8/cloud.spec index 37fe007e3fb..a88d4b1cbbf 100644 --- a/packaging/centos8/cloud.spec +++ b/packaging/el8/cloud.spec @@ -36,7 +36,7 @@ Group: System Environment/Libraries Source0: %{name}-%{_maventag}.tgz BuildRoot: %{_tmppath}/%{name}-%{_maventag}-%{release}-build -BuildRequires: java-11-openjdk-devel +BuildRequires: (java-11-openjdk-devel or java-17-openjdk-devel) #BuildRequires: ws-commons-util BuildRequires: jpackage-utils BuildRequires: gcc @@ -109,6 +109,7 @@ Requires: (net-tools or net-tools-deprecated) Requires: iproute Requires: ipset Requires: perl +Requires: rsync Requires: (python3-libvirt or python3-libvirt-python) Requires: (qemu-img or qemu-tools) Requires: qemu-kvm @@ -181,7 +182,7 @@ echo Doing CloudStack build %build -cp packaging/centos8/replace.properties build/replace.properties +cp packaging/el8/replace.properties build/replace.properties echo VERSION=%{_maventag} >> build/replace.properties echo PACKAGE=%{name} >> build/replace.properties touch build/gitrev.txt @@ -232,7 +233,7 @@ cp -r python/lib/cloudutils ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/python-s python3 -m py_compile ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/python-site/cloud_utils.py python3 -m compileall ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/python-site/cloudutils cp build/gitrev.txt ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/scripts -cp packaging/centos8/cloudstack-sccs ${RPM_BUILD_ROOT}/usr/bin +cp packaging/el8/cloudstack-sccs ${RPM_BUILD_ROOT}/usr/bin mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/scripts/network/cisco cp -r plugins/network-elements/cisco-vnmc/src/main/scripts/network/cisco/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/scripts/network/cisco @@ -260,7 +261,8 @@ install -D client/target/utilities/bin/cloud-setup-baremetal ${RPM_BUILD_ROOT}%{ install -D client/target/utilities/bin/cloud-sysvmadm ${RPM_BUILD_ROOT}%{_bindir}/%{name}-sysvmadm install -D client/target/utilities/bin/cloud-update-xenserver-licenses ${RPM_BUILD_ROOT}%{_bindir}/%{name}-update-xenserver-licenses # Bundle cmk in cloudstack-management -wget https://github.com/apache/cloudstack-cloudmonkey/releases/download/6.3.0/cmk.linux.x86-64 -O ${RPM_BUILD_ROOT}%{_bindir}/cmk +CMK_REL=$(wget -O - "https://api.github.com/repos/apache/cloudstack-cloudmonkey/releases" 2>/dev/null | jq -r '.[0].tag_name') +wget https://github.com/apache/cloudstack-cloudmonkey/releases/download/$CMK_REL/cmk.linux.x86-64 -O ${RPM_BUILD_ROOT}%{_bindir}/cmk chmod +x ${RPM_BUILD_ROOT}%{_bindir}/cmk cp -r client/target/utilities/scripts/db/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup @@ -289,13 +291,14 @@ install python/bindir/cloud-external-ipallocator.py ${RPM_BUILD_ROOT}%{_bindir}/ install -D client/target/pythonlibs/jasypt-1.9.3.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/lib/jasypt-1.9.3.jar install -D utils/target/cloud-utils-%{_maventag}-bundled.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/lib/%{name}-utils.jar -install -D packaging/centos8/cloud-ipallocator.rc ${RPM_BUILD_ROOT}%{_initrddir}/%{name}-ipallocator -install -D packaging/centos8/cloud.limits ${RPM_BUILD_ROOT}%{_sysconfdir}/security/limits.d/cloud +install -D packaging/el8/cloud-ipallocator.rc ${RPM_BUILD_ROOT}%{_initrddir}/%{name}-ipallocator +install -D packaging/el8/cloud.limits ${RPM_BUILD_ROOT}%{_sysconfdir}/security/limits.d/cloud install -D packaging/systemd/cloudstack-management.service ${RPM_BUILD_ROOT}%{_unitdir}/%{name}-management.service install -D packaging/systemd/cloudstack-management.default ${RPM_BUILD_ROOT}%{_sysconfdir}/default/%{name}-management install -D server/target/conf/cloudstack-sudoers ${RPM_BUILD_ROOT}%{_sysconfdir}/sudoers.d/%{name}-management touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid #install -D server/target/conf/cloudstack-catalina.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-catalina +install -D server/target/conf/cloudstack-management.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-management # SystemVM template mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm @@ -360,6 +363,7 @@ cp client/target/lib/mysql*jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-usage/lib/ install -D packaging/systemd/cloudstack-usage.service ${RPM_BUILD_ROOT}%{_unitdir}/%{name}-usage.service install -D packaging/systemd/cloudstack-usage.default ${RPM_BUILD_ROOT}%{_sysconfdir}/default/%{name}-usage mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/log/%{name}/usage/ +install -D usage/target/transformed/cloudstack-usage.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-usage # Marvin mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-marvin @@ -577,6 +581,7 @@ pip3 install --upgrade /usr/share/cloudstack-marvin/Marvin-*.tar.gz %config(noreplace) %{_sysconfdir}/%{name}/management/log4j2.xml %config(noreplace) %{_sysconfdir}/%{name}/management/environment.properties %config(noreplace) %{_sysconfdir}/%{name}/management/java.security.ciphers +%config(noreplace) %attr(0644,root,root) %{_sysconfdir}/logrotate.d/%{name}-management %attr(0644,root,root) %{_unitdir}/%{name}-management.service %attr(0755,cloud,cloud) %{_localstatedir}/run/%{name}-management.pid %attr(0755,root,root) %{_bindir}/%{name}-setup-management @@ -648,6 +653,7 @@ pip3 install --upgrade /usr/share/cloudstack-marvin/Marvin-*.tar.gz %files usage %attr(0644,root,root) %{_unitdir}/%{name}-usage.service %config(noreplace) %{_sysconfdir}/default/%{name}-usage +%config(noreplace) %attr(0644,root,root) %{_sysconfdir}/logrotate.d/%{name}-usage %attr(0644,root,root) %{_datadir}/%{name}-usage/*.jar %attr(0644,root,root) %{_datadir}/%{name}-usage/lib/*.jar %dir %attr(0770,root,cloud) %{_localstatedir}/log/%{name}/usage diff --git a/packaging/centos7/cloudstack-agent.te b/packaging/el8/cloudstack-agent.te similarity index 100% rename from packaging/centos7/cloudstack-agent.te rename to packaging/el8/cloudstack-agent.te diff --git a/packaging/centos7/cloudstack-sccs b/packaging/el8/cloudstack-sccs similarity index 100% rename from packaging/centos7/cloudstack-sccs rename to packaging/el8/cloudstack-sccs diff --git a/packaging/centos8/replace.properties b/packaging/el8/replace.properties similarity index 99% rename from packaging/centos8/replace.properties rename to packaging/el8/replace.properties index 8f1fb11f999..efeab01166e 100644 --- a/packaging/centos8/replace.properties +++ b/packaging/el8/replace.properties @@ -5,9 +5,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/packaging/el9 b/packaging/el9 index 575742e7ff2..4dad90d45e0 120000 --- a/packaging/el9 +++ b/packaging/el9 @@ -1 +1 @@ -centos8 \ No newline at end of file +el8 \ No newline at end of file diff --git a/packaging/package.sh b/packaging/package.sh index bf95f84a11a..ecffaace48b 100755 --- a/packaging/package.sh +++ b/packaging/package.sh @@ -22,11 +22,11 @@ Usage: ./package.sh -d DISTRO [OPTIONS]... Package CloudStack for specific distribution and provided options. If there's a "branding" string in the POM version (e.g. x.y.z.a-NAME[-SNAPSHOT]), the branding name will -be used in the final generated package like: cloudstack-management-x.y.z.a-NAME.NUMBER.el7.centos.x86_64 +be used in the final generated package like: cloudstack-management-x.y.z.a-NAME.NUMBER.el.x86_64 note that you can override/provide "branding" string with "-b, --brand" flag as well. Mandatory arguments: - -d, --distribution string Build package for specified distribution ("centos7") + -d, --distribution string Build package for specified distribution ("el8") Optional arguments: -p, --pack string Define which type of libraries to package ("oss"|"OSS"|"noredist"|"NOREDIST") (default "oss") @@ -42,12 +42,12 @@ Other arguments: -h, --help Display this help message and exit Examples: - package.sh --distribution centos7 - package.sh --distribution centos7 --pack oss - package.sh --distribution centos7 --pack noredist - package.sh --distribution centos7 --pack noredist -t "kvm,xen" - package.sh --distribution centos7 --release 42 - package.sh --distribution centos7 --pack noredist --release 42 + package.sh --distribution el8 + package.sh --distribution el8 --pack oss + package.sh --distribution el8 --pack noredist + package.sh --distribution el8 --pack noredist -t "kvm,xen" + package.sh --distribution el8 --release 42 + package.sh --distribution el8 --pack noredist --release 42 USAGE exit 0 diff --git a/packaging/suse15 b/packaging/suse15 index 575742e7ff2..4dad90d45e0 120000 --- a/packaging/suse15 +++ b/packaging/suse15 @@ -1 +1 @@ -centos8 \ No newline at end of file +el8 \ No newline at end of file diff --git a/packaging/systemd/cloudstack-agent.service b/packaging/systemd/cloudstack-agent.service index e04d672beaa..5e2e5db0b21 100644 --- a/packaging/systemd/cloudstack-agent.service +++ b/packaging/systemd/cloudstack-agent.service @@ -31,6 +31,8 @@ EnvironmentFile=/etc/default/cloudstack-agent ExecStart=/usr/bin/java $JAVA_OPTS $JAVA_DEBUG -cp $CLASSPATH $JAVA_CLASS Restart=always RestartSec=10s +StandardOutput=append:/var/log/cloudstack/agent/agent.out +StandardError=append:/var/log/cloudstack/agent/agent.err [Install] WantedBy=multi-user.target diff --git a/packaging/systemd/cloudstack-management.default b/packaging/systemd/cloudstack-management.default index ca8ff628fc1..994a1ee8699 100644 --- a/packaging/systemd/cloudstack-management.default +++ b/packaging/systemd/cloudstack-management.default @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. -JAVA_OPTS="-Djava.security.properties=/etc/cloudstack/management/java.security.ciphers -Djava.awt.headless=true -Dcom.sun.management.jmxremote=false -Xmx2G -XX:+UseParallelGC -XX:MaxGCPauseMillis=500 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/log/cloudstack/management/ -XX:ErrorFile=/var/log/cloudstack/management/cloudstack-management.err --add-opens=java.base/java.lang=ALL-UNNAMED --add-exports=java.base/sun.security.x509=ALL-UNNAMED" +JAVA_OPTS="-Djava.security.properties=/etc/cloudstack/management/java.security.ciphers -Djava.awt.headless=true -Xmx2G -XX:+UseParallelGC -XX:MaxGCPauseMillis=500 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/log/cloudstack/management/ -XX:ErrorFile=/var/log/cloudstack/management/cloudstack-management.err --add-opens=java.base/java.lang=ALL-UNNAMED --add-exports=java.base/sun.security.x509=ALL-UNNAMED" CLASSPATH="/usr/share/cloudstack-management/lib/*:/etc/cloudstack/management:/usr/share/cloudstack-common:/usr/share/cloudstack-management/setup:/usr/share/cloudstack-management:/usr/share/java/mysql-connector-java.jar:/usr/share/cloudstack-mysql-ha/lib/*" diff --git a/packaging/systemd/cloudstack-management.service b/packaging/systemd/cloudstack-management.service index b979f7f375a..55780af7a5c 100644 --- a/packaging/systemd/cloudstack-management.service +++ b/packaging/systemd/cloudstack-management.service @@ -35,6 +35,8 @@ EnvironmentFile=/etc/default/cloudstack-management WorkingDirectory=/var/log/cloudstack/management PIDFile=/var/run/cloudstack-management.pid ExecStart=/usr/bin/java $JAVA_DEBUG $JAVA_OPTS -cp $CLASSPATH $BOOTSTRAP_CLASS +StandardOutput=append:/var/log/cloudstack/management/management-server.out +StandardError=append:/var/log/cloudstack/management/management-server.err [Install] WantedBy=multi-user.target diff --git a/packaging/systemd/cloudstack-usage.service b/packaging/systemd/cloudstack-usage.service index c23814eaac6..bf5bd2a189b 100644 --- a/packaging/systemd/cloudstack-usage.service +++ b/packaging/systemd/cloudstack-usage.service @@ -35,6 +35,8 @@ Environment=JAVA_PID=$$ ExecStart=/bin/sh -ec '/usr/bin/java -Dpid=${JAVA_PID} $JAVA_OPTS $JAVA_DEBUG -cp $CLASSPATH $JAVA_CLASS' Restart=always RestartSec=10s +StandardOutput=append:/var/log/cloudstack/usage/usage.out +StandardError=append:/var/log/cloudstack/usage/usage.err [Install] WantedBy=multi-user.target diff --git a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java index 94b763d013f..db40b6e68dd 100644 --- a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java +++ b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java @@ -120,7 +120,9 @@ public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements API } if (accountRole.getRoleType() == RoleType.Admin && accountRole.getId() == RoleType.Admin.getId()) { - logger.info(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", account)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", account)); + } return true; } diff --git a/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java b/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java index 1e766468ba8..2e7ae23d6f1 100644 --- a/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java +++ b/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java @@ -72,7 +72,9 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP Project project = CallContext.current().getProject(); if (project == null) { - logger.warn(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning APIs [%s] for user [%s] as allowed.", apiNames, user)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning APIs [%s] for user [%s] as allowed.", apiNames, user)); + } return apiNames; } @@ -110,8 +112,10 @@ public class ProjectRoleBasedApiAccessChecker extends AdapterBase implements AP Project project = CallContext.current().getProject(); if (project == null) { - logger.warn(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning API [%s] for user [%s] as allowed.", apiCommandName, + if (logger.isTraceEnabled()) { + logger.trace(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning API [%s] for user [%s] as allowed.", apiCommandName, user)); + } return true; } diff --git a/plugins/acl/static-role-based/src/main/resources/META-INF/cloudstack/acl-static-role-based/spring-acl-static-role-based-context.xml b/plugins/acl/static-role-based/src/main/resources/META-INF/cloudstack/acl-static-role-based/spring-acl-static-role-based-context.xml index 633602e76c1..4c690660502 100644 --- a/plugins/acl/static-role-based/src/main/resources/META-INF/cloudstack/acl-static-role-based/spring-acl-static-role-based-context.xml +++ b/plugins/acl/static-role-based/src/main/resources/META-INF/cloudstack/acl-static-role-based/spring-acl-static-role-based-context.xml @@ -25,7 +25,7 @@ http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context.xsd" - > + > diff --git a/plugins/affinity-group-processors/explicit-dedication/src/main/resources/META-INF/cloudstack/explicit-dedication/spring-explicit-dedication-context.xml b/plugins/affinity-group-processors/explicit-dedication/src/main/resources/META-INF/cloudstack/explicit-dedication/spring-explicit-dedication-context.xml index 7e98c388026..5f049fa3d95 100644 --- a/plugins/affinity-group-processors/explicit-dedication/src/main/resources/META-INF/cloudstack/explicit-dedication/spring-explicit-dedication-context.xml +++ b/plugins/affinity-group-processors/explicit-dedication/src/main/resources/META-INF/cloudstack/explicit-dedication/spring-explicit-dedication-context.xml @@ -32,5 +32,5 @@ - + diff --git a/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java b/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java index 7f316fe7a91..b94cf49e4d9 100644 --- a/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java +++ b/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java @@ -23,9 +23,14 @@ import java.util.Map; import java.util.Set; import java.util.HashSet; import java.util.ArrayList; +import java.util.stream.Collectors; import javax.inject.Inject; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import org.apache.commons.collections.CollectionUtils; import org.apache.cloudstack.affinity.dao.AffinityGroupDao; @@ -54,9 +59,18 @@ public class HostAffinityProcessor extends AffinityProcessorBase implements Affi VirtualMachine vm = vmProfile.getVirtualMachine(); List vmGroupMappings = _affinityGroupVMMapDao.findByVmIdType(vm.getId(), getType()); if (CollectionUtils.isNotEmpty(vmGroupMappings)) { - for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { - processAffinityGroup(vmGroupMapping, plan, vm, vmList); - } + List affinityGroupIdList = vmGroupMappings.stream().map(AffinityGroupVMMapVO::getAffinityGroupId).collect(Collectors.toList()); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + if (!affinityGroupIdList.isEmpty()) { + _affinityGroupDao.listByIds(affinityGroupIdList, true); + } + for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { + processAffinityGroup(vmGroupMapping, plan, vm, vmList); + } + } + }); } } @@ -130,16 +144,25 @@ public class HostAffinityProcessor extends AffinityProcessorBase implements Affi long plannedHostId = plannedDestination.getHost().getId(); VirtualMachine vm = vmProfile.getVirtualMachine(); List vmGroupMappings = _affinityGroupVMMapDao.findByVmIdType(vm.getId(), getType()); - - if (CollectionUtils.isNotEmpty(vmGroupMappings)) { - for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { - if (!checkAffinityGroup(vmGroupMapping, vm, plannedHostId)) { - return false; - } - } + if (CollectionUtils.isEmpty(vmGroupMappings)) { + return true; } + List affinityGroupIds = vmGroupMappings.stream().map(AffinityGroupVMMapVO::getAffinityGroupId).collect(Collectors.toList()); + return Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + if (!affinityGroupIds.isEmpty()) { + _affinityGroupDao.listByIds(affinityGroupIds, true); + } + for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { + if (!checkAffinityGroup(vmGroupMapping, vm, plannedHostId)) { + return false; + } - return true; + } + return true; + } + }); } /** diff --git a/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java b/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java index 9feeeed2b6d..4681ce4321e 100644 --- a/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java +++ b/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java @@ -19,10 +19,12 @@ package org.apache.cloudstack.affinity; import java.util.Arrays; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.commons.collections.CollectionUtils; import org.apache.cloudstack.affinity.dao.AffinityGroupDao; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; @@ -37,6 +39,10 @@ import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.exception.AffinityConflictException; import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @@ -65,40 +71,56 @@ public class HostAntiAffinityProcessor extends AffinityProcessorBase implements VirtualMachine vm = vmProfile.getVirtualMachine(); List vmGroupMappings = _affinityGroupVMMapDao.findByVmIdType(vm.getId(), getType()); - for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { - if (vmGroupMapping != null) { - AffinityGroupVO group = _affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()); - - if (logger.isDebugEnabled()) { - logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); + if (CollectionUtils.isEmpty(vmGroupMappings)) { + return; + } + List affinityGroupIds = vmGroupMappings.stream().map(AffinityGroupVMMapVO::getAffinityGroupId).collect(Collectors.toList()); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + if (!affinityGroupIds.isEmpty()) { + _affinityGroupDao.listByIds(affinityGroupIds, true); } + for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { + processAffinityGroup(vmGroupMapping, avoid, vm); + } + } + }); - List groupVMIds = _affinityGroupVMMapDao.listVmIdsByAffinityGroup(group.getId()); - groupVMIds.remove(vm.getId()); + } - for (Long groupVMId : groupVMIds) { - VMInstanceVO groupVM = _vmInstanceDao.findById(groupVMId); - if (groupVM != null && !groupVM.isRemoved()) { - if (groupVM.getHostId() != null) { - avoid.addHost(groupVM.getHostId()); - if (logger.isDebugEnabled()) { - logger.debug("Added host " + groupVM.getHostId() + " to avoid set, since VM " + groupVM.getId() + " is present on the host"); - } - } else if (Arrays.asList(VirtualMachine.State.Starting, VirtualMachine.State.Stopped).contains(groupVM.getState()) && groupVM.getLastHostId() != null) { - long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - groupVM.getUpdateTime().getTime()) / 1000; - if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { - avoid.addHost(groupVM.getLastHostId()); - if (logger.isDebugEnabled()) { - logger.debug("Added host " + groupVM.getLastHostId() + " to avoid set, since VM " + groupVM.getId() + - " is present on the host, in Stopped state but has reserved capacity"); - } - } + protected void processAffinityGroup(AffinityGroupVMMapVO vmGroupMapping, ExcludeList avoid, VirtualMachine vm) { + if (vmGroupMapping != null) { + AffinityGroupVO group = _affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()); + + if (logger.isDebugEnabled()) { + logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); + } + + List groupVMIds = _affinityGroupVMMapDao.listVmIdsByAffinityGroup(group.getId()); + groupVMIds.remove(vm.getId()); + + for (Long groupVMId : groupVMIds) { + VMInstanceVO groupVM = _vmInstanceDao.findById(groupVMId); + if (groupVM != null && !groupVM.isRemoved()) { + if (groupVM.getHostId() != null) { + avoid.addHost(groupVM.getHostId()); + if (logger.isDebugEnabled()) { + logger.debug("Added host " + groupVM.getHostId() + " to avoid set, since VM " + groupVM.getId() + " is present on the host"); + } + } + } else if (Arrays.asList(VirtualMachine.State.Starting, VirtualMachine.State.Stopped).contains(groupVM.getState()) && groupVM.getLastHostId() != null) { + long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - groupVM.getUpdateTime().getTime()) / 1000; + if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { + avoid.addHost(groupVM.getLastHostId()); + if (logger.isDebugEnabled()) { + logger.debug("Added host " + groupVM.getLastHostId() + " to avoid set, since VM " + groupVM.getId() + + " is present on the host, in Stopped state but has reserved capacity"); } } } } } - } @Override @@ -119,6 +141,9 @@ public class HostAntiAffinityProcessor extends AffinityProcessorBase implements VirtualMachine vm = vmProfile.getVirtualMachine(); List vmGroupMappings = _affinityGroupVMMapDao.findByVmIdType(vm.getId(), getType()); + if (CollectionUtils.isEmpty(vmGroupMappings)) { + return true; + } for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { // if more than 1 VM's are present in the group then check for @@ -137,7 +162,34 @@ public class HostAntiAffinityProcessor extends AffinityProcessorBase implements } } } - return true; + + List affinityGroupIds = vmGroupMappings.stream().map(AffinityGroupVMMapVO::getAffinityGroupId).collect(Collectors.toList()); + return Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + if (!affinityGroupIds.isEmpty()) { + _affinityGroupDao.listByIds(affinityGroupIds, true); + } + for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { + // if more than 1 VM's are present in the group then check for + // conflict due to parallel deployment + List groupVMIds = _affinityGroupVMMapDao.listVmIdsByAffinityGroup(vmGroupMapping.getAffinityGroupId()); + groupVMIds.remove(vm.getId()); + + for (Long groupVMId : groupVMIds) { + VMReservationVO vmReservation = _reservationDao.findByVmId(groupVMId); + if (vmReservation != null && vmReservation.getHostId() != null && vmReservation.getHostId().equals(plannedHostId)) { + if (logger.isDebugEnabled()) { + logger.debug("Planned destination for VM " + vm.getId() + " conflicts with an existing VM " + vmReservation.getVmId() + + " reserved on the same host " + plannedHostId); + } + return false; + } + } + } + return true; + } + }); } } diff --git a/plugins/affinity-group-processors/host-anti-affinity/src/main/resources/META-INF/cloudstack/host-anti-affinity/spring-host-anti-affinity-context.xml b/plugins/affinity-group-processors/host-anti-affinity/src/main/resources/META-INF/cloudstack/host-anti-affinity/spring-host-anti-affinity-context.xml index 362bb470d38..0cfe6b38084 100644 --- a/plugins/affinity-group-processors/host-anti-affinity/src/main/resources/META-INF/cloudstack/host-anti-affinity/spring-host-anti-affinity-context.xml +++ b/plugins/affinity-group-processors/host-anti-affinity/src/main/resources/META-INF/cloudstack/host-anti-affinity/spring-host-anti-affinity-context.xml @@ -33,5 +33,5 @@ - + diff --git a/plugins/affinity-group-processors/non-strict-host-affinity/src/main/resources/META-INF/cloudstack/non-strict-host-affinity/spring-non-strict-host-affinity-context.xml b/plugins/affinity-group-processors/non-strict-host-affinity/src/main/resources/META-INF/cloudstack/non-strict-host-affinity/spring-non-strict-host-affinity-context.xml index a80ddb1e3bb..3d3959b7e2b 100644 --- a/plugins/affinity-group-processors/non-strict-host-affinity/src/main/resources/META-INF/cloudstack/non-strict-host-affinity/spring-non-strict-host-affinity-context.xml +++ b/plugins/affinity-group-processors/non-strict-host-affinity/src/main/resources/META-INF/cloudstack/non-strict-host-affinity/spring-non-strict-host-affinity-context.xml @@ -33,5 +33,5 @@
    - + diff --git a/plugins/affinity-group-processors/non-strict-host-anti-affinity/src/main/resources/META-INF/cloudstack/non-strict-host-anti-affinity/spring-non-strict-host-anti-affinity-context.xml b/plugins/affinity-group-processors/non-strict-host-anti-affinity/src/main/resources/META-INF/cloudstack/non-strict-host-anti-affinity/spring-non-strict-host-anti-affinity-context.xml index 0f42019b269..8c8a0f4218f 100644 --- a/plugins/affinity-group-processors/non-strict-host-anti-affinity/src/main/resources/META-INF/cloudstack/non-strict-host-anti-affinity/spring-non-strict-host-anti-affinity-context.xml +++ b/plugins/affinity-group-processors/non-strict-host-anti-affinity/src/main/resources/META-INF/cloudstack/non-strict-host-anti-affinity/spring-non-strict-host-anti-affinity-context.xml @@ -33,5 +33,5 @@
    - + diff --git a/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/response/ApiDiscoveryResponse.java b/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/response/ApiDiscoveryResponse.java index dccf5a68e11..81a9df750cb 100644 --- a/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/response/ApiDiscoveryResponse.java +++ b/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/response/ApiDiscoveryResponse.java @@ -16,13 +16,14 @@ // under the License. package org.apache.cloudstack.api.response; -import com.cloud.serializer.Param; -import com.google.gson.annotations.SerializedName; +import java.util.HashSet; +import java.util.Set; + import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseResponse; -import java.util.HashSet; -import java.util.Set; +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; @SuppressWarnings("unused") public class ApiDiscoveryResponse extends BaseResponse { @@ -64,6 +65,18 @@ public class ApiDiscoveryResponse extends BaseResponse { isAsync = false; } + public ApiDiscoveryResponse(ApiDiscoveryResponse another) { + this.name = another.getName(); + this.description = another.getDescription(); + this.since = another.getSince(); + this.isAsync = another.getAsync(); + this.related = another.getRelated(); + this.params = new HashSet<>(another.getParams()); + this.apiResponse = new HashSet<>(another.getApiResponse()); + this.type = another.getType(); + this.setObjectName(another.getObjectName()); + } + public void setName(String name) { this.name = name; } @@ -123,4 +136,8 @@ public class ApiDiscoveryResponse extends BaseResponse { public Set getApiResponse() { return apiResponse; } + + public String getType() { + return type; + } } diff --git a/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/response/ApiParameterResponse.java b/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/response/ApiParameterResponse.java index 7713f6b5d69..75f0aacd504 100644 --- a/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/response/ApiParameterResponse.java +++ b/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/response/ApiParameterResponse.java @@ -16,12 +16,14 @@ // under the License. package org.apache.cloudstack.api.response; -import com.google.gson.annotations.SerializedName; +import java.util.List; +import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseResponse; import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; public class ApiParameterResponse extends BaseResponse { @SerializedName(ApiConstants.NAME) @@ -52,6 +54,8 @@ public class ApiParameterResponse extends BaseResponse { @Param(description = "comma separated related apis to get the parameter") private String related; + private transient List authorizedRoleTypes = null; + public ApiParameterResponse() { } @@ -87,4 +91,11 @@ public class ApiParameterResponse extends BaseResponse { this.related = related; } + public void setAuthorizedRoleTypes(List authorizedRoleTypes) { + this.authorizedRoleTypes = authorizedRoleTypes; + } + + public List getAuthorizedRoleTypes() { + return authorizedRoleTypes; + } } diff --git a/plugins/api/discovery/src/main/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java b/plugins/api/discovery/src/main/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java index 239bc49a65a..452b95cf2c0 100644 --- a/plugins/api/discovery/src/main/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java +++ b/plugins/api/discovery/src/main/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java @@ -18,8 +18,10 @@ package org.apache.cloudstack.discovery; import java.lang.reflect.Field; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; @@ -28,21 +30,22 @@ import java.util.Set; import javax.inject.Inject; import org.apache.cloudstack.acl.APIChecker; +import org.apache.cloudstack.acl.Role; +import org.apache.cloudstack.acl.RoleService; +import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseAsyncCmd; import org.apache.cloudstack.api.BaseAsyncCreateCmd; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.BaseResponse; import org.apache.cloudstack.api.Parameter; -import org.apache.cloudstack.acl.Role; -import org.apache.cloudstack.acl.RoleService; -import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.command.user.discovery.ListApisCmd; import org.apache.cloudstack.api.response.ApiDiscoveryResponse; import org.apache.cloudstack.api.response.ApiParameterResponse; import org.apache.cloudstack.api.response.ApiResponseResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.reflections.ReflectionUtils; import org.springframework.stereotype.Component; @@ -215,6 +218,9 @@ public class ApiDiscoveryServiceImpl extends ComponentLifecycleBase implements A paramResponse.setSince(parameterAnnotation.since()); } paramResponse.setRelated(parameterAnnotation.entityType()[0].getName()); + if (parameterAnnotation.authorized() != null) { + paramResponse.setAuthorizedRoleTypes(Arrays.asList(parameterAnnotation.authorized())); + } response.addParam(paramResponse); } } @@ -247,6 +253,7 @@ public class ApiDiscoveryServiceImpl extends ComponentLifecycleBase implements A if (user == null) return null; + Account account = accountService.getAccount(user.getAccountId()); if (name != null) { if (!s_apiNameDiscoveryResponseMap.containsKey(name)) @@ -260,10 +267,9 @@ public class ApiDiscoveryServiceImpl extends ComponentLifecycleBase implements A return null; } } - responseList.add(s_apiNameDiscoveryResponseMap.get(name)); + responseList.add(getApiDiscoveryResponseWithAccessibleParams(name, account)); } else { - Account account = accountService.getAccount(user.getAccountId()); if (account == null) { throw new PermissionDeniedException(String.format("The account with id [%s] for user [%s] is null.", user.getAccountId(), user)); } @@ -284,13 +290,33 @@ public class ApiDiscoveryServiceImpl extends ComponentLifecycleBase implements A } for (String apiName: apisAllowed) { - responseList.add(s_apiNameDiscoveryResponseMap.get(apiName)); + responseList.add(getApiDiscoveryResponseWithAccessibleParams(apiName, account)); } } response.setResponses(responseList); return response; } + private static ApiDiscoveryResponse getApiDiscoveryResponseWithAccessibleParams(String name, Account account) { + if (Account.Type.ADMIN.equals(account.getType())) { + return s_apiNameDiscoveryResponseMap.get(name); + } + ApiDiscoveryResponse apiDiscoveryResponse = + new ApiDiscoveryResponse(s_apiNameDiscoveryResponseMap.get(name)); + Iterator iterator = apiDiscoveryResponse.getParams().iterator(); + while (iterator.hasNext()) { + ApiParameterResponse parameterResponse = iterator.next(); + List authorizedRoleTypes = parameterResponse.getAuthorizedRoleTypes(); + RoleType accountRoleType = RoleType.getByAccountType(account.getType()); + if (CollectionUtils.isNotEmpty(parameterResponse.getAuthorizedRoleTypes()) && + accountRoleType != null && + !authorizedRoleTypes.contains(accountRoleType)) { + iterator.remove(); + } + } + return apiDiscoveryResponse; + } + @Override public List> getCommands() { List> cmdList = new ArrayList>(); diff --git a/plugins/api/rate-limit/pom.xml b/plugins/api/rate-limit/pom.xml index 73bdd0697d1..2449a23f2d0 100644 --- a/plugins/api/rate-limit/pom.xml +++ b/plugins/api/rate-limit/pom.xml @@ -34,7 +34,7 @@ maven-surefire-plugin always - @{argLine} -Xmx2048m -XX:MaxPermSize=1024m + @{argLine} -Xmx2048m -XX:MaxMetaspaceSize=1024m org/apache/cloudstack/ratelimit/integration/* diff --git a/plugins/api/rate-limit/src/main/resources/META-INF/cloudstack/rate-limit/spring-rate-limit-context.xml b/plugins/api/rate-limit/src/main/resources/META-INF/cloudstack/rate-limit/spring-rate-limit-context.xml index d25c57cb0c5..c8308092288 100644 --- a/plugins/api/rate-limit/src/main/resources/META-INF/cloudstack/rate-limit/spring-rate-limit-context.xml +++ b/plugins/api/rate-limit/src/main/resources/META-INF/cloudstack/rate-limit/spring-rate-limit-context.xml @@ -28,5 +28,5 @@ > - + diff --git a/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java b/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java index fa376f992ed..f162c51a703 100644 --- a/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java +++ b/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java @@ -24,6 +24,7 @@ import java.util.Map; import javax.inject.Inject; +import com.cloud.storage.dao.VolumeDao; import org.apache.cloudstack.backup.dao.BackupDao; import com.cloud.utils.Pair; @@ -37,6 +38,8 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider { @Inject private BackupDao backupDao; + @Inject + private VolumeDao volumeDao; @Override public String getName() { @@ -76,7 +79,7 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider { } @Override - public Pair restoreBackedUpVolume(Backup backup, String volumeUuid, String hostIp, String dataStoreUuid) { + public Pair restoreBackedUpVolume(Backup backup, String volumeUuid, String hostIp, String dataStoreUuid, Pair vmNameAndState) { logger.debug("Restoring volume " + volumeUuid + "from backup " + backup.getUuid() + " on the Dummy Backup Provider"); throw new CloudRuntimeException("Dummy plugin does not support this feature"); } @@ -123,6 +126,7 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider { backup.setAccountId(vm.getAccountId()); backup.setDomainId(vm.getDomainId()); backup.setZoneId(vm.getDataCenterId()); + backup.setBackedUpVolumes(BackupManagerImpl.createVolumeInfoFromVolumes(volumeDao.findByInstance(vm.getId()))); return backupDao.persist(backup) != null; } diff --git a/plugins/backup/nas/pom.xml b/plugins/backup/nas/pom.xml new file mode 100644 index 00000000000..096bf45c67e --- /dev/null +++ b/plugins/backup/nas/pom.xml @@ -0,0 +1,54 @@ + + + 4.0.0 + cloud-plugin-backup-nas + Apache CloudStack Plugin - KVM NAS Backup and Recovery Plugin + + cloudstack-plugins + org.apache.cloudstack + 4.20.0.0-SNAPSHOT + ../../pom.xml + + + + org.apache.cloudstack + cloud-plugin-hypervisor-kvm + ${project.version} + + + org.apache.commons + commons-lang3 + ${cs.commons-lang3.version} + + + com.fasterxml.jackson.core + jackson-databind + ${cs.jackson.version} + + + com.github.tomakehurst + wiremock-standalone + ${cs.wiremock.version} + test + + + diff --git a/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java b/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java new file mode 100644 index 00000000000..4a6725abdca --- /dev/null +++ b/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java @@ -0,0 +1,442 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import com.cloud.agent.AgentManager; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.storage.ScopeType; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.Pair; +import com.cloud.utils.component.AdapterBase; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.VMInstanceDao; +import org.apache.cloudstack.backup.dao.BackupDao; +import org.apache.cloudstack.backup.dao.BackupOfferingDao; +import org.apache.cloudstack.backup.dao.BackupRepositoryDao; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.commons.collections.CollectionUtils; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import javax.inject.Inject; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.HashMap; +import java.util.Objects; +import java.util.Optional; +import java.util.UUID; +import java.util.stream.Collectors; + +public class NASBackupProvider extends AdapterBase implements BackupProvider, Configurable { + private static final Logger LOG = LogManager.getLogger(NASBackupProvider.class); + + @Inject + private BackupDao backupDao; + + @Inject + private BackupRepositoryDao backupRepositoryDao; + + @Inject + private BackupOfferingDao backupOfferingDao; + + @Inject + private HostDao hostDao; + + @Inject + private ClusterDao clusterDao; + + @Inject + private VolumeDao volumeDao; + + @Inject + private StoragePoolHostDao storagePoolHostDao; + + @Inject + private VMInstanceDao vmInstanceDao; + + @Inject + private PrimaryDataStoreDao primaryDataStoreDao; + + @Inject + private AgentManager agentManager; + + protected Host getLastVMHypervisorHost(VirtualMachine vm) { + Long hostId = vm.getLastHostId(); + if (hostId == null) { + LOG.debug("Cannot find last host for vm. This should never happen, please check your database."); + return null; + } + Host host = hostDao.findById(hostId); + + if (host.getStatus() == Status.Up) { + return host; + } else { + // Try to find any Up host in the same cluster + for (final Host hostInCluster : hostDao.findHypervisorHostInCluster(host.getClusterId())) { + if (hostInCluster.getStatus() == Status.Up) { + LOG.debug("Found Host " + hostInCluster.getName()); + return hostInCluster; + } + } + } + // Try to find any Host in the zone + for (final HostVO hostInZone : hostDao.listByDataCenterIdAndHypervisorType(host.getDataCenterId(), Hypervisor.HypervisorType.KVM)) { + if (hostInZone.getStatus() == Status.Up) { + LOG.debug("Found Host " + hostInZone.getName()); + return hostInZone; + } + } + return null; + } + + protected Host getVMHypervisorHost(VirtualMachine vm) { + Long hostId = vm.getHostId(); + if (hostId == null && VirtualMachine.State.Running.equals(vm.getState())) { + throw new CloudRuntimeException(String.format("Unable to find the hypervisor host for %s. Make sure the virtual machine is running", vm.getName())); + } + if (VirtualMachine.State.Stopped.equals(vm.getState())) { + hostId = vm.getLastHostId(); + } + if (hostId == null) { + throw new CloudRuntimeException(String.format("Unable to find the hypervisor host for stopped VM: %s", vm)); + } + final Host host = hostDao.findById(hostId); + if (host == null || !Status.Up.equals(host.getStatus()) || !Hypervisor.HypervisorType.KVM.equals(host.getHypervisorType())) { + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } + return host; + } + + @Override + public boolean takeBackup(final VirtualMachine vm) { + final Host host = getVMHypervisorHost(vm); + + final BackupRepository backupRepository = backupRepositoryDao.findByBackupOfferingId(vm.getBackupOfferingId()); + if (backupRepository == null) { + throw new CloudRuntimeException("No valid backup repository found for the VM, please check the attached backup offering"); + } + + final Date creationDate = new Date(); + final String backupPath = String.format("%s/%s", vm.getInstanceName(), + new SimpleDateFormat("yyyy.MM.dd.HH.mm.ss").format(creationDate)); + + BackupVO backupVO = createBackupObject(vm, backupPath); + TakeBackupCommand command = new TakeBackupCommand(vm.getInstanceName(), backupPath); + command.setBackupRepoType(backupRepository.getType()); + command.setBackupRepoAddress(backupRepository.getAddress()); + command.setMountOptions(backupRepository.getMountOptions()); + + if (VirtualMachine.State.Stopped.equals(vm.getState())) { + List vmVolumes = volumeDao.findByInstance(vm.getId()); + List volumePaths = getVolumePaths(vmVolumes); + command.setVolumePaths(volumePaths); + } + + BackupAnswer answer = null; + try { + answer = (BackupAnswer) agentManager.send(host.getId(), command); + } catch (AgentUnavailableException e) { + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } catch (OperationTimedoutException e) { + throw new CloudRuntimeException("Operation to initiate backup timed out, please try again"); + } + + if (answer != null && answer.getResult()) { + backupVO.setDate(new Date()); + backupVO.setSize(answer.getSize()); + backupVO.setStatus(Backup.Status.BackedUp); + backupVO.setBackedUpVolumes(BackupManagerImpl.createVolumeInfoFromVolumes(volumeDao.findByInstance(vm.getId()))); + return backupDao.update(backupVO.getId(), backupVO); + } else { + backupVO.setStatus(Backup.Status.Failed); + backupDao.remove(backupVO.getId()); + } + return Objects.nonNull(answer) && answer.getResult(); + } + + private BackupVO createBackupObject(VirtualMachine vm, String backupPath) { + BackupVO backup = new BackupVO(); + backup.setVmId(vm.getId()); + backup.setExternalId(backupPath); + backup.setType("FULL"); + backup.setDate(new Date()); + long virtualSize = 0L; + for (final Volume volume: volumeDao.findByInstance(vm.getId())) { + if (Volume.State.Ready.equals(volume.getState())) { + virtualSize += volume.getSize(); + } + } + backup.setProtectedSize(Long.valueOf(virtualSize)); + backup.setStatus(Backup.Status.BackingUp); + backup.setBackupOfferingId(vm.getBackupOfferingId()); + backup.setAccountId(vm.getAccountId()); + backup.setDomainId(vm.getDomainId()); + backup.setZoneId(vm.getDataCenterId()); + return backupDao.persist(backup); + } + + @Override + public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { + List backedVolumes = backup.getBackedUpVolumes(); + List volumes = backedVolumes.stream().map(volume -> volumeDao.findByUuid(volume.getUuid())).collect(Collectors.toList()); + + LOG.debug("Restoring vm {} from backup {} on the NAS Backup Provider", vm.getUuid(), backup.getUuid()); + BackupRepository backupRepository = getBackupRepository(vm, backup); + + final Host host = getLastVMHypervisorHost(vm); + RestoreBackupCommand restoreCommand = new RestoreBackupCommand(); + restoreCommand.setBackupPath(backup.getExternalId()); + restoreCommand.setBackupRepoType(backupRepository.getType()); + restoreCommand.setBackupRepoAddress(backupRepository.getAddress()); + restoreCommand.setVmName(vm.getName()); + restoreCommand.setVolumePaths(getVolumePaths(volumes)); + restoreCommand.setVmExists(vm.getRemoved() == null); + restoreCommand.setVmState(vm.getState()); + + BackupAnswer answer = null; + try { + answer = (BackupAnswer) agentManager.send(host.getId(), restoreCommand); + } catch (AgentUnavailableException e) { + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } catch (OperationTimedoutException e) { + throw new CloudRuntimeException("Operation to initiate backup timed out, please try again"); + } + return answer.getResult(); + } + + private List getVolumePaths(List volumes) { + List volumePaths = new ArrayList<>(); + for (VolumeVO volume : volumes) { + StoragePoolVO storagePool = primaryDataStoreDao.findById(volume.getPoolId()); + if (Objects.isNull(storagePool)) { + throw new CloudRuntimeException("Unable to find storage pool associated to the volume"); + } + String volumePathPrefix = String.format("/mnt/%s", storagePool.getUuid()); + if (ScopeType.HOST.equals(storagePool.getScope())) { + volumePathPrefix = storagePool.getPath(); + } + volumePaths.add(String.format("%s/%s", volumePathPrefix, volume.getPath())); + } + return volumePaths; + } + + @Override + public Pair restoreBackedUpVolume(Backup backup, String volumeUuid, String hostIp, String dataStoreUuid, Pair vmNameAndState) { + final VolumeVO volume = volumeDao.findByUuid(volumeUuid); + final VirtualMachine backupSourceVm = vmInstanceDao.findById(backup.getVmId()); + final StoragePoolHostVO dataStore = storagePoolHostDao.findByUuid(dataStoreUuid); + final HostVO hostVO = hostDao.findByIp(hostIp); + + Optional matchingVolume = getBackedUpVolumeInfo(backupSourceVm.getBackupVolumeList(), volumeUuid); + Long backedUpVolumeSize = matchingVolume.isPresent() ? matchingVolume.get().getSize() : 0L; + + LOG.debug("Restoring vm volume" + volumeUuid + "from backup " + backup.getUuid() + " on the NAS Backup Provider"); + BackupRepository backupRepository = getBackupRepository(backupSourceVm, backup); + + VolumeVO restoredVolume = new VolumeVO(Volume.Type.DATADISK, null, backup.getZoneId(), + backup.getDomainId(), backup.getAccountId(), 0, null, + backup.getSize(), null, null, null); + String volumeUUID = UUID.randomUUID().toString(); + restoredVolume.setName("RestoredVol-"+volume.getName()); + restoredVolume.setProvisioningType(volume.getProvisioningType()); + restoredVolume.setUpdated(new Date()); + restoredVolume.setUuid(volumeUUID); + restoredVolume.setRemoved(null); + restoredVolume.setDisplayVolume(true); + restoredVolume.setPoolId(dataStore.getPoolId()); + restoredVolume.setPath(restoredVolume.getUuid()); + restoredVolume.setState(Volume.State.Copying); + restoredVolume.setSize(backedUpVolumeSize); + restoredVolume.setDiskOfferingId(volume.getDiskOfferingId()); + + RestoreBackupCommand restoreCommand = new RestoreBackupCommand(); + restoreCommand.setBackupPath(backup.getExternalId()); + restoreCommand.setBackupRepoType(backupRepository.getType()); + restoreCommand.setBackupRepoAddress(backupRepository.getAddress()); + restoreCommand.setVmName(vmNameAndState.first()); + restoreCommand.setVolumePaths(Collections.singletonList(String.format("%s/%s", dataStore.getLocalPath(), volumeUUID))); + restoreCommand.setDiskType(volume.getVolumeType().name().toLowerCase(Locale.ROOT)); + restoreCommand.setVmExists(null); + restoreCommand.setVmState(vmNameAndState.second()); + restoreCommand.setRestoreVolumeUUID(volumeUuid); + + BackupAnswer answer = null; + try { + answer = (BackupAnswer) agentManager.send(hostVO.getId(), restoreCommand); + } catch (AgentUnavailableException e) { + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } catch (OperationTimedoutException e) { + throw new CloudRuntimeException("Operation to initiate backup timed out, please try again"); + } + + if (answer.getResult()) { + try { + volumeDao.persist(restoredVolume); + } catch (Exception e) { + throw new CloudRuntimeException("Unable to create restored volume due to: " + e); + } + } + + return new Pair<>(answer.getResult(), answer.getDetails()); + } + + private BackupRepository getBackupRepository(VirtualMachine vm, Backup backup) { + BackupRepository backupRepository = backupRepositoryDao.findByBackupOfferingId(vm.getBackupOfferingId()); + final String errorMessage = "No valid backup repository found for the VM, please check the attached backup offering"; + if (backupRepository == null) { + logger.warn(errorMessage + "Re-attempting with the backup offering associated with the backup"); + } + backupRepository = backupRepositoryDao.findByBackupOfferingId(backup.getBackupOfferingId()); + if (backupRepository == null) { + throw new CloudRuntimeException(errorMessage); + } + return backupRepository; + } + + private Optional getBackedUpVolumeInfo(List backedUpVolumes, String volumeUuid) { + return backedUpVolumes.stream() + .filter(v -> v.getUuid().equals(volumeUuid)) + .findFirst(); + } + + @Override + public boolean deleteBackup(Backup backup, boolean forced) { + final BackupRepository backupRepository = backupRepositoryDao.findByBackupOfferingId(backup.getBackupOfferingId()); + if (backupRepository == null) { + throw new CloudRuntimeException("No valid backup repository found for the VM, please check the attached backup offering"); + } + + final VirtualMachine vm = vmInstanceDao.findByIdIncludingRemoved(backup.getVmId()); + final Host host = getLastVMHypervisorHost(vm); + + DeleteBackupCommand command = new DeleteBackupCommand(backup.getExternalId(), backupRepository.getType(), + backupRepository.getAddress(), backupRepository.getMountOptions()); + + BackupAnswer answer = null; + try { + answer = (BackupAnswer) agentManager.send(host.getId(), command); + } catch (AgentUnavailableException e) { + throw new CloudRuntimeException("Unable to contact backend control plane to initiate backup"); + } catch (OperationTimedoutException e) { + throw new CloudRuntimeException("Operation to initiate backup timed out, please try again"); + } + + if (answer != null && answer.getResult()) { + return backupDao.remove(backup.getId()); + } + + return false; + } + + @Override + public Map getBackupMetrics(Long zoneId, List vms) { + final Map metrics = new HashMap<>(); + if (CollectionUtils.isEmpty(vms)) { + LOG.warn("Unable to get VM Backup Metrics because the list of VMs is empty."); + return metrics; + } + + for (final VirtualMachine vm : vms) { + Long vmBackupSize = 0L; + Long vmBackupProtectedSize = 0L; + for (final Backup backup: backupDao.listByVmId(null, vm.getId())) { + vmBackupSize += backup.getSize(); + vmBackupProtectedSize += backup.getProtectedSize(); + } + Backup.Metric vmBackupMetric = new Backup.Metric(vmBackupSize,vmBackupProtectedSize); + LOG.debug(String.format("Metrics for VM [uuid: %s, name: %s] is [backup size: %s, data size: %s].", vm.getUuid(), + vm.getInstanceName(), vmBackupMetric.getBackupSize(), vmBackupMetric.getDataSize())); + metrics.put(vm, vmBackupMetric); + } + return metrics; + } + + @Override + public boolean assignVMToBackupOffering(VirtualMachine vm, BackupOffering backupOffering) { + return Hypervisor.HypervisorType.KVM.equals(vm.getHypervisorType()); + } + + @Override + public boolean removeVMFromBackupOffering(VirtualMachine vm) { + return true; + } + + @Override + public boolean willDeleteBackupsOnOfferingRemoval() { + return false; + } + + @Override + public void syncBackups(VirtualMachine vm, Backup.Metric metric) { + // TODO: check and sum/return backups metrics on per VM basis + } + + @Override + public List listBackupOfferings(Long zoneId) { + final List repositories = backupRepositoryDao.listByZoneAndProvider(zoneId, getName()); + final List offerings = new ArrayList<>(); + for (final BackupRepository repository : repositories) { + offerings.add(new NasBackupOffering(repository.getName(), repository.getUuid())); + } + return offerings; + } + + @Override + public boolean isValidProviderOffering(Long zoneId, String uuid) { + return true; + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[]{ + }; + } + + @Override + public String getName() { + return "nas"; + } + + @Override + public String getDescription() { + return "NAS Backup Plugin"; + } + + @Override + public String getConfigComponentName() { + return BackupService.class.getSimpleName(); + } +} diff --git a/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NasBackupOffering.java b/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NasBackupOffering.java new file mode 100644 index 00000000000..91df74166e5 --- /dev/null +++ b/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NasBackupOffering.java @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import java.util.Date; + +public class NasBackupOffering implements BackupOffering { + + private String name; + private String uid; + + public NasBackupOffering(String name, String uid) { + this.name = name; + this.uid = uid; + } + + @Override + public String getExternalId() { + return uid; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getDescription() { + return "NAS Backup Offering (Repository)"; + } + + @Override + public long getZoneId() { + return -1; + } + + @Override + public boolean isUserDrivenBackupAllowed() { + return true; + } + + @Override + public String getProvider() { + return "nas"; + } + + @Override + public Date getCreated() { + return null; + } + + @Override + public String getUuid() { + return uid; + } + + @Override + public long getId() { + return -1; + } +} diff --git a/plugins/backup/nas/src/main/resources/META-INF/cloudstack/nas/module.properties b/plugins/backup/nas/src/main/resources/META-INF/cloudstack/nas/module.properties new file mode 100644 index 00000000000..2e101ef0231 --- /dev/null +++ b/plugins/backup/nas/src/main/resources/META-INF/cloudstack/nas/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=nas +parent=backup diff --git a/plugins/backup/nas/src/main/resources/META-INF/cloudstack/nas/spring-backup-nas-context.xml b/plugins/backup/nas/src/main/resources/META-INF/cloudstack/nas/spring-backup-nas-context.xml new file mode 100644 index 00000000000..635ca66fbde --- /dev/null +++ b/plugins/backup/nas/src/main/resources/META-INF/cloudstack/nas/spring-backup-nas-context.xml @@ -0,0 +1,26 @@ + + + + + + + diff --git a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java index e375b42aeb5..0e87ad33887 100644 --- a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java +++ b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java @@ -372,7 +372,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid } @Override - public Pair restoreBackedUpVolume(Backup backup, String volumeUuid, String hostIp, String dataStoreUuid) { + public Pair restoreBackedUpVolume(Backup backup, String volumeUuid, String hostIp, String dataStoreUuid, Pair vmNameAndState) { String networkerServer; VolumeVO volume = volumeDao.findByUuid(volumeUuid); VMInstanceVO backupSourceVm = vmInstanceDao.findById(backup.getVmId()); @@ -512,6 +512,7 @@ public class NetworkerBackupProvider extends AdapterBase implements BackupProvid LOG.info ("EMC Networker finished backup job for vm " + vm.getName() + " with saveset Time: " + saveTime); BackupVO backup = getClient(vm.getDataCenterId()).registerBackupForVm(vm, backupJobStart, saveTime); if (backup != null) { + backup.setBackedUpVolumes(BackupManagerImpl.createVolumeInfoFromVolumes(volumeDao.findByInstance(vm.getId()))); backupDao.persist(backup); return true; } else { diff --git a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java index 0e453739018..4750e3264aa 100644 --- a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java +++ b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java @@ -291,7 +291,7 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, } @Override - public Pair restoreBackedUpVolume(Backup backup, String volumeUuid, String hostIp, String dataStoreUuid) { + public Pair restoreBackedUpVolume(Backup backup, String volumeUuid, String hostIp, String dataStoreUuid, Pair vmNameAndState) { final Long zoneId = backup.getZoneId(); final String restorePointId = backup.getExternalId(); return getClient(zoneId).restoreVMToDifferentLocation(restorePointId, hostIp, dataStoreUuid); diff --git a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java index 8a193c1ce80..d911736090c 100644 --- a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java +++ b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java @@ -346,7 +346,7 @@ public class VeeamClient { String type = pair.second(); String path = url.replace(apiURI.toString(), ""); if (type.equals("RestoreSession")) { - return checkIfRestoreSessionFinished(type, path); + checkIfRestoreSessionFinished(type, path); } } return true; @@ -362,17 +362,29 @@ public class VeeamClient { return false; } - protected boolean checkIfRestoreSessionFinished(String type, String path) throws IOException { - for (int j = 0; j < this.restoreTimeout; j++) { + + /** + * Checks the status of the restore session. Checked states are "Success" and "Failure".
    + * There is also a timeout defined in the global configuration, backup.plugin.veeam.restore.timeout,
    + * that is used to wait for the restore to complete before throwing a {@link CloudRuntimeException}. + */ + protected void checkIfRestoreSessionFinished(String type, String path) throws IOException { + for (int j = 0; j < restoreTimeout; j++) { HttpResponse relatedResponse = get(path); RestoreSession session = parseRestoreSessionResponse(relatedResponse); if (session.getResult().equals("Success")) { - return true; + return; } + if (session.getResult().equalsIgnoreCase("Failed")) { String sessionUid = session.getUid(); + logger.error(String.format("Failed to restore backup [%s] of VM [%s] due to [%s].", + sessionUid, session.getVmDisplayName(), + getRestoreVmErrorDescription(StringUtils.substringAfterLast(sessionUid, ":")))); throw new CloudRuntimeException(String.format("Restore job [%s] failed.", sessionUid)); } + logger.debug(String.format("Waiting %s seconds, out of a total of %s seconds, for the restore backup process to finish.", j, restoreTimeout)); + try { Thread.sleep(1000); } catch (InterruptedException ignored) { @@ -931,6 +943,29 @@ public class VeeamClient { return new Pair<>(result.first(), restoreLocation); } + /** + * Tries to retrieve the error's description of the Veeam restore task that resulted in an error. + * @param uid Session uid in Veeam of the restore process; + * @return the description found in Veeam about the cause of error in the restore process. + */ + protected String getRestoreVmErrorDescription(String uid) { + logger.debug(String.format("Trying to find the cause of error in the restore process [%s].", uid)); + List cmds = Arrays.asList( + String.format("$restoreUid = '%s'", uid), + "$restore = Get-VBRRestoreSession -Id $restoreUid", + "if ($restore) {", + "Write-Output $restore.Description", + "} else {", + "Write-Output 'Cannot find restore session with provided uid $restoreUid'", + "}" + ); + Pair result = executePowerShellCommands(cmds); + if (result != null && result.first()) { + return result.second(); + } + return String.format("Failed to get the description of the failed restore session [%s]. Please contact an administrator.", uid); + } + private boolean isLegacyServer() { return this.veeamServerVersion != null && (this.veeamServerVersion > 0 && this.veeamServerVersion < 11); } diff --git a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java index b00455968c6..63d6896bb85 100644 --- a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java +++ b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java @@ -38,7 +38,7 @@ import org.apache.cloudstack.backup.Backup; import org.apache.cloudstack.backup.BackupOffering; import org.apache.cloudstack.backup.veeam.api.RestoreSession; import org.apache.http.HttpResponse; -import org.apache.logging.log4j.core.Logger; +import org.apache.logging.log4j.Logger; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; @@ -163,7 +163,7 @@ public class VeeamClientTest { Mockito.when(mockClient.get(Mockito.anyString())).thenReturn(httpResponse); Mockito.when(mockClient.parseRestoreSessionResponse(httpResponse)).thenReturn(restoreSession); Mockito.when(restoreSession.getResult()).thenReturn("No Success"); - Mockito.when(mockClient.checkIfRestoreSessionFinished(Mockito.eq("RestoreTest"), Mockito.eq("any"))).thenCallRealMethod(); + Mockito.doCallRealMethod().when(mockClient).checkIfRestoreSessionFinished(Mockito.eq("RestoreTest"), Mockito.eq("any")); mockClient.checkIfRestoreSessionFinished("RestoreTest", "any"); fail(); } catch (Exception e) { @@ -172,6 +172,42 @@ public class VeeamClientTest { Mockito.verify(mockClient, times(10)).get(Mockito.anyString()); } + @Test + public void getRestoreVmErrorDescriptionTestFindErrorDescription() { + Pair response = new Pair<>(true, "Example of error description found in Veeam."); + Mockito.when(mockClient.getRestoreVmErrorDescription("uuid")).thenCallRealMethod(); + Mockito.when(mockClient.executePowerShellCommands(Mockito.any())).thenReturn(response); + String result = mockClient.getRestoreVmErrorDescription("uuid"); + Assert.assertEquals("Example of error description found in Veeam.", result); + } + + @Test + public void getRestoreVmErrorDescriptionTestNotFindErrorDescription() { + Pair response = new Pair<>(true, "Cannot find restore session with provided uid uuid"); + Mockito.when(mockClient.getRestoreVmErrorDescription("uuid")).thenCallRealMethod(); + Mockito.when(mockClient.executePowerShellCommands(Mockito.any())).thenReturn(response); + String result = mockClient.getRestoreVmErrorDescription("uuid"); + Assert.assertEquals("Cannot find restore session with provided uid uuid", result); + } + + @Test + public void getRestoreVmErrorDescriptionTestWhenPowerShellOutputIsNull() { + Mockito.when(mockClient.getRestoreVmErrorDescription("uuid")).thenCallRealMethod(); + Mockito.when(mockClient.executePowerShellCommands(Mockito.any())).thenReturn(null); + String result = mockClient.getRestoreVmErrorDescription("uuid"); + Assert.assertEquals("Failed to get the description of the failed restore session [uuid]. Please contact an administrator.", result); + } + + @Test + public void getRestoreVmErrorDescriptionTestWhenPowerShellOutputIsFalse() { + Pair response = new Pair<>(false, null); + Mockito.when(mockClient.getRestoreVmErrorDescription("uuid")).thenCallRealMethod(); + Mockito.when(mockClient.executePowerShellCommands(Mockito.any())).thenReturn(response); + String result = mockClient.getRestoreVmErrorDescription("uuid"); + Assert.assertEquals("Failed to get the description of the failed restore session [uuid]. Please contact an administrator.", result); + } + + private void verifyBackupMetrics(Map metrics) { Assert.assertEquals(2, metrics.size()); diff --git a/plugins/ca/root-ca/src/main/java/org/apache/cloudstack/ca/provider/RootCAProvider.java b/plugins/ca/root-ca/src/main/java/org/apache/cloudstack/ca/provider/RootCAProvider.java index d7001ce941a..25c45ed2a10 100644 --- a/plugins/ca/root-ca/src/main/java/org/apache/cloudstack/ca/provider/RootCAProvider.java +++ b/plugins/ca/root-ca/src/main/java/org/apache/cloudstack/ca/provider/RootCAProvider.java @@ -35,9 +35,11 @@ import java.security.Security; import java.security.SignatureException; import java.security.UnrecoverableKeyException; import java.security.cert.CertificateException; +import java.security.cert.CertificateParsingException; import java.security.cert.X509Certificate; import java.security.spec.InvalidKeySpecException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.Enumeration; import java.util.HashSet; @@ -53,7 +55,6 @@ import javax.net.ssl.TrustManager; import javax.net.ssl.TrustManagerFactory; import javax.xml.bind.DatatypeConverter; -import com.cloud.configuration.Config; import org.apache.cloudstack.ca.CAManager; import org.apache.cloudstack.framework.ca.CAProvider; import org.apache.cloudstack.framework.ca.Certificate; @@ -62,6 +63,8 @@ import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.utils.security.CertUtils; import org.apache.cloudstack.utils.security.KeyStoreUtils; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.bouncycastle.asn1.pkcs.Attribute; import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers; import org.bouncycastle.asn1.x509.Extension; @@ -75,11 +78,11 @@ import org.bouncycastle.util.io.pem.PemObject; import org.bouncycastle.util.io.pem.PemReader; import com.cloud.certificate.dao.CrlDao; +import com.cloud.configuration.Config; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; -import org.apache.commons.lang3.StringUtils; public final class RootCAProvider extends AdapterBase implements CAProvider, Configurable { @@ -130,6 +133,8 @@ public final class RootCAProvider extends AdapterBase implements CAProvider, Con "true", "When set to true, it will allow expired client certificate during SSL handshake.", true); + private static String managementCertificateCustomSAN; + /////////////////////////////////////////////////////////// /////////////// Root CA Private Methods /////////////////// @@ -371,8 +376,11 @@ public final class RootCAProvider extends AdapterBase implements CAProvider, Con List nicIps = NetUtils.getAllDefaultNicIps(); addConfiguredManagementIp(nicIps); nicIps = new ArrayList<>(new HashSet<>(nicIps)); + List domainNames = new ArrayList<>(); + domainNames.add(NetUtils.getHostName()); + domainNames.add(CAManager.CertManagementCustomSubjectAlternativeName.value()); - final Certificate serverCertificate = issueCertificate(Collections.singletonList(NetUtils.getHostName()), nicIps, getCaValidityDays()); + final Certificate serverCertificate = issueCertificate(domainNames, nicIps, getCaValidityDays()); if (serverCertificate == null || serverCertificate.getPrivateKey() == null) { throw new CloudRuntimeException("Failed to generate management server certificate and load management server keystore"); @@ -431,6 +439,7 @@ public final class RootCAProvider extends AdapterBase implements CAProvider, Con @Override public boolean start() { + managementCertificateCustomSAN = CAManager.CertManagementCustomSubjectAlternativeName.value(); return loadRootCAKeyPair() && loadRootCAKeyPair() && loadManagementKeyStore(); } @@ -485,4 +494,26 @@ public final class RootCAProvider extends AdapterBase implements CAProvider, Con public String getDescription() { return "CloudStack's Root CA provider plugin"; } + + @Override + public boolean isManagementCertificate(java.security.cert.Certificate certificate) throws CertificateParsingException { + if (!(certificate instanceof X509Certificate)) { + return false; + } + X509Certificate x509Certificate = (X509Certificate) certificate; + + // Check for alternative names + Collection> altNames = x509Certificate.getSubjectAlternativeNames(); + if (CollectionUtils.isEmpty(altNames)) { + return false; + } + for (List altName : altNames) { + int type = (Integer) altName.get(0); + String name = (String) altName.get(1); + if (type == GeneralName.dNSName && managementCertificateCustomSAN.equals(name)) { + return true; + } + } + return false; + } } diff --git a/plugins/ca/root-ca/src/test/java/org/apache/cloudstack/ca/provider/RootCAProviderTest.java b/plugins/ca/root-ca/src/test/java/org/apache/cloudstack/ca/provider/RootCAProviderTest.java index 15514b91c78..8311f4d45ab 100644 --- a/plugins/ca/root-ca/src/test/java/org/apache/cloudstack/ca/provider/RootCAProviderTest.java +++ b/plugins/ca/root-ca/src/test/java/org/apache/cloudstack/ca/provider/RootCAProviderTest.java @@ -26,8 +26,13 @@ import java.security.NoSuchAlgorithmException; import java.security.NoSuchProviderException; import java.security.SignatureException; import java.security.cert.CertificateException; +import java.security.cert.CertificateParsingException; import java.security.cert.X509Certificate; +import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.UUID; import javax.net.ssl.SSLEngine; @@ -35,15 +40,16 @@ import org.apache.cloudstack.framework.ca.Certificate; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.utils.security.CertUtils; import org.apache.cloudstack.utils.security.SSLUtils; +import org.bouncycastle.asn1.x509.GeneralName; import org.joda.time.DateTime; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; - -import org.mockito.junit.MockitoJUnitRunner; import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; @RunWith(MockitoJUnitRunner.class) @@ -150,4 +156,56 @@ public class RootCAProviderTest { Assert.assertEquals(provider.getProviderName(), "root"); } + @Test + public void testIsManagementCertificateNotX509() { + try { + Assert.assertFalse(provider.isManagementCertificate(Mockito.mock(java.security.cert.Certificate.class))); + } catch (CertificateParsingException e) { + Assert.fail(String.format("Exception occurred: %s", e.getMessage())); + } + } + + @Test + public void testIsManagementCertificateNoAltNames() { + try { + X509Certificate certificate = Mockito.mock(X509Certificate.class); + Mockito.when(certificate.getSubjectAlternativeNames()).thenReturn(new ArrayList<>()); + Assert.assertFalse(provider.isManagementCertificate(certificate)); + } catch (CertificateParsingException e) { + Assert.fail(String.format("Exception occurred: %s", e.getMessage())); + } + } + + @Test + public void testIsManagementCertificateNoMatch() { + ReflectionTestUtils.setField(provider, "managementCertificateCustomSAN", "cloudstack"); + try { + X509Certificate certificate = Mockito.mock(X509Certificate.class); + List> altNames = new ArrayList<>(); + altNames.add(List.of(GeneralName.dNSName, UUID.randomUUID().toString())); + altNames.add(List.of(GeneralName.dNSName, UUID.randomUUID().toString())); + Collection> collection = new ArrayList<>(altNames); + Mockito.when(certificate.getSubjectAlternativeNames()).thenReturn(collection); + Assert.assertFalse(provider.isManagementCertificate(certificate)); + } catch (CertificateParsingException e) { + Assert.fail(String.format("Exception occurred: %s", e.getMessage())); + } + } + + @Test + public void testIsManagementCertificateMatch() { + String customSAN = "cloudstack"; + ReflectionTestUtils.setField(provider, "managementCertificateCustomSAN", customSAN); + try { + X509Certificate certificate = Mockito.mock(X509Certificate.class); + List> altNames = new ArrayList<>(); + altNames.add(List.of(GeneralName.dNSName, customSAN)); + altNames.add(List.of(GeneralName.dNSName, UUID.randomUUID().toString())); + Collection> collection = new ArrayList<>(altNames); + Mockito.when(certificate.getSubjectAlternativeNames()).thenReturn(collection); + Assert.assertTrue(provider.isManagementCertificate(certificate)); + } catch (CertificateParsingException e) { + Assert.fail(String.format("Exception occurred: %s", e.getMessage())); + } + } } diff --git a/plugins/database/quota/pom.xml b/plugins/database/quota/pom.xml index 9dada4128a5..b574b263020 100644 --- a/plugins/database/quota/pom.xml +++ b/plugins/database/quota/pom.xml @@ -62,5 +62,10 @@ joda-time ${cs.joda-time.version} + + org.apache.cloudstack + cloud-plugin-api-discovery + ${project.version} + diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaPresetVariablesListCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaPresetVariablesListCmd.java new file mode 100644 index 00000000000..8de16dd2741 --- /dev/null +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaPresetVariablesListCmd.java @@ -0,0 +1,66 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//with the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. +package org.apache.cloudstack.api.command; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.user.Account; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.QuotaPresetVariablesItemResponse; +import org.apache.cloudstack.api.response.QuotaResponseBuilder; +import org.apache.cloudstack.quota.constant.QuotaTypes; + +import javax.inject.Inject; +import java.util.List; + +@APICommand(name = "quotaPresetVariablesList", responseObject = QuotaPresetVariablesItemResponse.class, description = "List the preset variables available for using in the " + + "Quota tariff activation rules given the usage type.", since = "4.20", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +public class QuotaPresetVariablesListCmd extends BaseCmd { + + @Inject + QuotaResponseBuilder quotaResponseBuilder; + + @Parameter(name = ApiConstants.USAGE_TYPE, type = CommandType.INTEGER, required = true, description = "The usage type for which the preset variables will be retrieved.") + private Integer quotaType; + + @Override + public void execute() { + List responses = quotaResponseBuilder.listQuotaPresetVariables(this); + ListResponse listResponse = new ListResponse<>(); + listResponse.setResponses(responses); + listResponse.setResponseName(getCommandName()); + setResponseObject(listResponse); + } + + public QuotaTypes getQuotaType() { + QuotaTypes quotaTypes = QuotaTypes.getQuotaType(quotaType); + + if (quotaTypes == null) { + throw new InvalidParameterValueException(String.format("Usage type not found for value [%s].", quotaType)); + } + + return quotaTypes; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffCreateCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffCreateCmd.java index b9406754b31..f1fd4b4afe1 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffCreateCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffCreateCmd.java @@ -54,10 +54,7 @@ public class QuotaTariffCreateCmd extends BaseCmd { @Parameter(name = "value", type = CommandType.DOUBLE, required = true, description = "The quota tariff value of the resource as per the default unit.") private Double value; - @Parameter(name = ApiConstants.ACTIVATION_RULE, type = CommandType.STRING, description = "Quota tariff's activation rule. It can receive a JS script that results in either " + - "a boolean or a numeric value: if it results in a boolean value, the tariff value will be applied according to the result; if it results in a numeric value, the " + - "numeric value will be applied; if the result is neither a boolean nor a numeric value, the tariff will not be applied. If the rule is not informed, the tariff " + - "value will be applied.", length = 65535) + @Parameter(name = ApiConstants.ACTIVATION_RULE, type = CommandType.STRING, description = ApiConstants.PARAMETER_DESCRIPTION_ACTIVATION_RULE, length = 65535) private String activationRule; @Parameter(name = ApiConstants.START_DATE, type = CommandType.DATE, description = "The effective start date on/after which the quota tariff is effective. Inform null to " + @@ -68,6 +65,9 @@ public class QuotaTariffCreateCmd extends BaseCmd { ApiConstants.PARAMETER_DESCRIPTION_END_DATE_POSSIBLE_FORMATS) private Date endDate; + @Parameter(name = ApiConstants.POSITION, type = CommandType.INTEGER, description = "Position in the execution sequence for tariffs of the same type", since = "4.20.0.0") + private Integer position; + @Override public void execute() { CallContext.current().setEventDetails(String.format("Tariff: %s, description: %s, value: %s", getName(), getDescription(), getValue())); @@ -77,7 +77,7 @@ public class QuotaTariffCreateCmd extends BaseCmd { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create new quota tariff."); } - QuotaTariffResponse response = responseBuilder.createQuotaTariffResponse(result); + QuotaTariffResponse response = responseBuilder.createQuotaTariffResponse(result, true); response.setResponseName(getCommandName()); setResponseObject(response); } @@ -139,4 +139,13 @@ public class QuotaTariffCreateCmd extends BaseCmd { public ApiCommandResourceType getApiResourceType() { return ApiCommandResourceType.QuotaTariff; } + public Integer getPosition() { + return position; + } + + public void setPosition(Integer position) { + this.position = position; + } + + } diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffListCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffListCmd.java index b4e8c868e40..d054d545931 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffListCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffListCmd.java @@ -17,15 +17,18 @@ package org.apache.cloudstack.api.command; import com.cloud.user.Account; +import com.cloud.user.User; import com.cloud.utils.Pair; import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiArgValidator; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseListCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.QuotaResponseBuilder; import org.apache.cloudstack.api.response.QuotaTariffResponse; +import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.quota.vo.QuotaTariffVO; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @@ -59,20 +62,29 @@ public class QuotaTariffListCmd extends BaseListCmd { + "list all, including the removed ones. The default is false.", since = "4.18.0.0") private boolean listAll = false; - public QuotaTariffListCmd() { - super(); - } + @Parameter(name = ApiConstants.LIST_ONLY_REMOVED, type = CommandType.BOOLEAN, description = "If set to true, we will list only the removed tariffs." + + " The default is false.") + private boolean listOnlyRemoved = false; + + @Parameter(name = ApiConstants.ID, type = CommandType.STRING, description = "The quota tariff's id.", validations = {ApiArgValidator.UuidString}) + private String id; @Override public void execute() { final Pair, Integer> result = _responseBuilder.listQuotaTariffPlans(this); + User user = CallContext.current().getCallingUser(); + boolean returnActivationRules = _responseBuilder.isUserAllowedToSeeActivationRules(user); + if (!returnActivationRules) { + logger.debug("User [{}] does not have permission to create or update quota tariffs, therefore we will not return the activation rules.", user.getUuid()); + } + final List responses = new ArrayList<>(); - logger.trace(String.format("Adding quota tariffs [%s] to response of API quotaTariffList.", ReflectionToStringBuilderUtils.reflectCollection(responses))); + logger.trace("Adding quota tariffs [{}] to response of API quotaTariffList.", ReflectionToStringBuilderUtils.reflectCollection(responses)); for (final QuotaTariffVO resource : result.first()) { - responses.add(_responseBuilder.createQuotaTariffResponse(resource)); + responses.add(_responseBuilder.createQuotaTariffResponse(resource, returnActivationRules)); } final ListResponse response = new ListResponse<>(); @@ -106,4 +118,15 @@ public class QuotaTariffListCmd extends BaseListCmd { return listAll; } + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public boolean isListOnlyRemoved() { + return listOnlyRemoved; + } } diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffUpdateCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffUpdateCmd.java index 4fc1f08da88..b5766875507 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffUpdateCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffUpdateCmd.java @@ -63,12 +63,13 @@ public class QuotaTariffUpdateCmd extends BaseCmd { since = "4.18.0.0") private String description; - @Parameter(name = ApiConstants.ACTIVATION_RULE, type = CommandType.STRING, description = "Quota tariff's activation rule. It can receive a JS script that results in either " + - "a boolean or a numeric value: if it results in a boolean value, the tariff value will be applied according to the result; if it results in a numeric value, the " + - "numeric value will be applied; if the result is neither a boolean nor a numeric value, the tariff will not be applied. If the rule is not informed, the tariff " + - "value will be applied. Inform empty to remove the activation rule.", length = 65535, since = "4.18.0.0") + @Parameter(name = ApiConstants.ACTIVATION_RULE, type = CommandType.STRING, description = ApiConstants.PARAMETER_DESCRIPTION_ACTIVATION_RULE + + " Inform empty to remove the activation rule.", length = 65535, since = "4.18.0.0") private String activationRule; + @Parameter(name = ApiConstants.POSITION, type = CommandType.INTEGER, description = "Position in the execution sequence for tariffs of the same type", since = "4.20.0.0") + private Integer position; + public Integer getUsageType() { return usageType; } @@ -116,7 +117,7 @@ public class QuotaTariffUpdateCmd extends BaseCmd { if (result == null) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update quota tariff plan"); } - final QuotaTariffResponse response = _responseBuilder.createQuotaTariffResponse(result); + final QuotaTariffResponse response = _responseBuilder.createQuotaTariffResponse(result, true); response.setResponseName(getCommandName()); setResponseObject(response); } @@ -130,4 +131,13 @@ public class QuotaTariffUpdateCmd extends BaseCmd { public ApiCommandResourceType getApiResourceType() { return ApiCommandResourceType.QuotaTariff; } + + public Integer getPosition() { + return position; + } + + public void setPosition(Integer position) { + this.position = position; + } + } diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaPresetVariablesItemResponse.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaPresetVariablesItemResponse.java new file mode 100644 index 00000000000..a1b80fd94eb --- /dev/null +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaPresetVariablesItemResponse.java @@ -0,0 +1,47 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//with the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. +package org.apache.cloudstack.api.response; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; +import org.apache.cloudstack.api.BaseResponse; + +public class QuotaPresetVariablesItemResponse extends BaseResponse { + @SerializedName("variable") + @Param(description = "variable") + private String variable; + + @SerializedName("description") + @Param(description = "description") + private String description; + + public QuotaPresetVariablesItemResponse() { + super("variables"); + } + + public void setVariable(String variable) { + this.variable = variable; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } +} diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaResponseBuilder.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaResponseBuilder.java index 57aa04e00fa..c635551aeb5 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaResponseBuilder.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaResponseBuilder.java @@ -16,10 +16,12 @@ //under the License. package org.apache.cloudstack.api.response; +import com.cloud.user.User; import org.apache.cloudstack.api.command.QuotaBalanceCmd; import org.apache.cloudstack.api.command.QuotaConfigureEmailCmd; import org.apache.cloudstack.api.command.QuotaEmailTemplateListCmd; import org.apache.cloudstack.api.command.QuotaEmailTemplateUpdateCmd; +import org.apache.cloudstack.api.command.QuotaPresetVariablesListCmd; import org.apache.cloudstack.api.command.QuotaStatementCmd; import org.apache.cloudstack.api.command.QuotaTariffCreateCmd; import org.apache.cloudstack.api.command.QuotaTariffListCmd; @@ -40,7 +42,9 @@ public interface QuotaResponseBuilder { Pair, Integer> listQuotaTariffPlans(QuotaTariffListCmd cmd); - QuotaTariffResponse createQuotaTariffResponse(QuotaTariffVO configuration); + QuotaTariffResponse createQuotaTariffResponse(QuotaTariffVO quotaTariff, boolean returnActivationRule); + + boolean isUserAllowedToSeeActivationRules(User user); QuotaStatementResponse createQuotaStatementResponse(List quotaUsage); @@ -72,6 +76,13 @@ public interface QuotaResponseBuilder { boolean deleteQuotaTariff(String quotaTariffUuid); + /** + * Lists the preset variables for the usage type informed in the command. + * @param cmd used to retrieve the Quota usage type parameter. + * @return the response consisting of a {@link List} of the preset variables and their descriptions. + */ + List listQuotaPresetVariables(QuotaPresetVariablesListCmd cmd); + Pair configureQuotaEmail(QuotaConfigureEmailCmd cmd); QuotaConfigureEmailResponse createQuotaConfigureEmailResponse(QuotaEmailConfigurationVO quotaEmailConfigurationVO, Double minBalance, long accountId); diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaResponseBuilderImpl.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaResponseBuilderImpl.java index 94f821828ab..1c486759e43 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaResponseBuilderImpl.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaResponseBuilderImpl.java @@ -16,11 +16,15 @@ //under the License. package org.apache.cloudstack.api.response; +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; +import java.lang.reflect.ParameterizedType; import java.math.BigDecimal; import java.math.RoundingMode; import java.time.LocalDate; import java.time.ZoneId; import java.util.ArrayList; +import java.util.Arrays; import java.util.Calendar; import java.util.Collection; import java.util.Collections; @@ -31,6 +35,7 @@ import java.util.Iterator; import java.util.List; import java.util.ListIterator; import java.util.function.Consumer; +import java.util.stream.Collectors; import javax.inject.Inject; @@ -41,15 +46,22 @@ import org.apache.cloudstack.api.command.QuotaBalanceCmd; import org.apache.cloudstack.api.command.QuotaConfigureEmailCmd; import org.apache.cloudstack.api.command.QuotaEmailTemplateListCmd; import org.apache.cloudstack.api.command.QuotaEmailTemplateUpdateCmd; +import org.apache.cloudstack.api.command.QuotaPresetVariablesListCmd; import org.apache.cloudstack.api.command.QuotaStatementCmd; import org.apache.cloudstack.api.command.QuotaTariffCreateCmd; import org.apache.cloudstack.api.command.QuotaTariffListCmd; import org.apache.cloudstack.api.command.QuotaTariffUpdateCmd; import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.discovery.ApiDiscoveryService; import org.apache.cloudstack.quota.QuotaManager; import org.apache.cloudstack.quota.QuotaManagerImpl; import org.apache.cloudstack.quota.QuotaService; import org.apache.cloudstack.quota.QuotaStatement; +import org.apache.cloudstack.quota.activationrule.presetvariables.ComputingResources; +import org.apache.cloudstack.quota.activationrule.presetvariables.GenericPresetVariable; +import org.apache.cloudstack.quota.activationrule.presetvariables.PresetVariableDefinition; +import org.apache.cloudstack.quota.activationrule.presetvariables.PresetVariables; +import org.apache.cloudstack.quota.activationrule.presetvariables.Value; import org.apache.cloudstack.quota.constant.QuotaConfig; import org.apache.cloudstack.quota.constant.QuotaTypes; import org.apache.cloudstack.quota.dao.QuotaAccountDao; @@ -67,6 +79,9 @@ import org.apache.cloudstack.quota.vo.QuotaEmailTemplatesVO; import org.apache.cloudstack.quota.vo.QuotaTariffVO; import org.apache.cloudstack.quota.vo.QuotaUsageVO; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.reflect.FieldUtils; +import org.apache.commons.lang3.ObjectUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; @@ -119,8 +134,13 @@ public class QuotaResponseBuilderImpl implements QuotaResponseBuilder { @Inject private QuotaEmailConfigurationDao quotaEmailConfigurationDao; + private final Class[] assignableClasses = {GenericPresetVariable.class, ComputingResources.class}; + + @Inject + private ApiDiscoveryService apiDiscoveryService; + @Override - public QuotaTariffResponse createQuotaTariffResponse(QuotaTariffVO tariff) { + public QuotaTariffResponse createQuotaTariffResponse(QuotaTariffVO tariff, boolean returnActivationRule) { final QuotaTariffResponse response = new QuotaTariffResponse(); response.setUsageType(tariff.getUsageType()); response.setUsageName(tariff.getUsageName()); @@ -130,12 +150,15 @@ public class QuotaResponseBuilderImpl implements QuotaResponseBuilder { response.setEffectiveOn(tariff.getEffectiveOn()); response.setUsageTypeDescription(tariff.getUsageTypeDescription()); response.setCurrency(QuotaConfig.QuotaCurrencySymbol.value()); - response.setActivationRule(tariff.getActivationRule()); response.setName(tariff.getName()); response.setEndDate(tariff.getEndDate()); response.setDescription(tariff.getDescription()); response.setId(tariff.getUuid()); response.setRemoved(tariff.getRemoved()); + response.setPosition(tariff.getPosition()); + if (returnActivationRule) { + response.setActivationRule(tariff.getActivationRule()); + } return response; } @@ -211,6 +234,11 @@ public class QuotaResponseBuilderImpl implements QuotaResponseBuilder { } } + public boolean isUserAllowedToSeeActivationRules(User user) { + List apiList = (List) apiDiscoveryService.listApis(user, null).getResponses(); + return apiList.stream().anyMatch(response -> StringUtils.equalsAny(response.getName(), "quotaTariffCreate", "quotaTariffUpdate")); + } + @Override public QuotaBalanceResponse createQuotaBalanceResponse(List quotaBalance, Date startDate, Date endDate) { if (quotaBalance == null || quotaBalance.isEmpty()) { @@ -383,11 +411,14 @@ public class QuotaResponseBuilderImpl implements QuotaResponseBuilder { boolean listAll = cmd.isListAll(); Long startIndex = cmd.getStartIndex(); Long pageSize = cmd.getPageSizeVal(); + String uuid = cmd.getId(); + boolean listOnlyRemoved = cmd.isListOnlyRemoved(); + String keyword = cmd.getKeyword(); - logger.debug(String.format("Listing quota tariffs for parameters [%s].", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(cmd, "effectiveDate", - "endDate", "listAll", "name", "page", "pageSize", "usageType"))); + logger.debug("Listing quota tariffs for parameters [{}].", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(cmd, "effectiveDate", + "endDate", "listAll", "name", "page", "pageSize", "usageType", "uuid", "listOnlyRemoved", "keyword")); - return _quotaTariffDao.listQuotaTariffs(startDate, endDate, usageType, name, null, listAll, startIndex, pageSize); + return _quotaTariffDao.listQuotaTariffs(startDate, endDate, usageType, name, uuid, listAll, listOnlyRemoved, startIndex, pageSize, keyword); } @Override @@ -399,6 +430,7 @@ public class QuotaResponseBuilderImpl implements QuotaResponseBuilder { String description = cmd.getDescription(); String activationRule = cmd.getActivationRule(); Date now = new Date(); + Integer position = cmd.getPosition(); warnQuotaTariffUpdateDeprecatedFields(cmd); @@ -413,7 +445,7 @@ public class QuotaResponseBuilderImpl implements QuotaResponseBuilder { currentQuotaTariff.setRemoved(now); QuotaTariffVO newQuotaTariff = persistNewQuotaTariff(currentQuotaTariff, name, 0, currentQuotaTariffStartDate, cmd.getEntityOwnerId(), endDate, value, description, - activationRule); + activationRule, position); _quotaTariffDao.updateQuotaTariff(currentQuotaTariff); CallContext.current().setEventResourceId(newQuotaTariff.getId()); @@ -434,7 +466,7 @@ public class QuotaResponseBuilderImpl implements QuotaResponseBuilder { } protected QuotaTariffVO persistNewQuotaTariff(QuotaTariffVO currentQuotaTariff, String name, int usageType, Date startDate, Long entityOwnerId, Date endDate, Double value, - String description, String activationRule) { + String description, String activationRule, Integer position) { QuotaTariffVO newQuotaTariff = getNewQuotaTariffObject(currentQuotaTariff, name, usageType); @@ -446,6 +478,7 @@ public class QuotaResponseBuilderImpl implements QuotaResponseBuilder { validateValueOnCreatingNewQuotaTariff(newQuotaTariff, value); validateStringsOnCreatingNewQuotaTariff(newQuotaTariff::setDescription, description); validateStringsOnCreatingNewQuotaTariff(newQuotaTariff::setActivationRule, activationRule); + validatePositionOnCreatingNewQuotaTariff(newQuotaTariff, position); _quotaTariffDao.addQuotaTariff(newQuotaTariff); return newQuotaTariff; @@ -466,6 +499,13 @@ public class QuotaResponseBuilderImpl implements QuotaResponseBuilder { return newQuotaTariff; } + protected void validatePositionOnCreatingNewQuotaTariff(QuotaTariffVO newQuotaTariff, Integer position) { + if (position != null) { + newQuotaTariff.setPosition(position); + } + } + + protected void validateStringsOnCreatingNewQuotaTariff(Consumer method, String value){ if (value != null) { method.accept(value.isBlank() ? null : value); @@ -648,6 +688,7 @@ public class QuotaResponseBuilderImpl implements QuotaResponseBuilder { Double value = cmd.getValue(); String description = cmd.getDescription(); String activationRule = cmd.getActivationRule(); + Integer position = ObjectUtils.defaultIfNull(cmd.getPosition(), 1); QuotaTariffVO currentQuotaTariff = _quotaTariffDao.findByName(name); @@ -660,7 +701,7 @@ public class QuotaResponseBuilderImpl implements QuotaResponseBuilder { "Please, inform a date in the future or do not pass the parameter to use the current date and time.", startDate)); } - QuotaTariffVO newQuotaTariff = persistNewQuotaTariff(null, name, usageType, startDate, cmd.getEntityOwnerId(), endDate, value, description, activationRule); + QuotaTariffVO newQuotaTariff = persistNewQuotaTariff(null, name, usageType, startDate, cmd.getEntityOwnerId(), endDate, value, description, activationRule, position); CallContext.current().setEventResourceId(newQuotaTariff.getId()); @@ -680,6 +721,119 @@ public class QuotaResponseBuilderImpl implements QuotaResponseBuilder { return _quotaTariffDao.updateQuotaTariff(quotaTariff); } + @Override + public List listQuotaPresetVariables(QuotaPresetVariablesListCmd cmd) { + List response; + List> variables = new ArrayList<>(); + + QuotaTypes quotaType = cmd.getQuotaType(); + addAllPresetVariables(PresetVariables.class, quotaType, variables, null); + response = createQuotaPresetVariablesResponse(variables); + + return response; + } + + /** + * Adds all preset variables for the given quota type. It recursively finds all presets variables for the given {@link Class} and puts it in a {@link List}. Each item in the + * list is a {@link Pair} that consists of the variable name and its description. + * + * @param clazz used to find the non-transient fields. If it is equal to the {@link Value} class, then it only gets the declared fields, otherwise, it gets all fields, + * including its parent's fields. + * @param quotaType used to check if the field supports the quota resource type. It uses the annotation method {@link PresetVariableDefinition#supportedTypes()} for this + * verification. + * @param variables the {@link List} which contains the {@link Pair} of the preset variable and its description. + * @param recursiveVariableName {@link String} used for recursively building the preset variable string. + */ + public void addAllPresetVariables(Class clazz, QuotaTypes quotaType, List> variables, String recursiveVariableName) { + Field[] allFields = Value.class.equals(clazz) ? clazz.getDeclaredFields() : FieldUtils.getAllFields(clazz); + List fieldsNonTransients = Arrays.stream(allFields).filter(field -> !Modifier.isTransient(field.getModifiers())).collect(Collectors.toList()); + for (Field field : fieldsNonTransients) { + PresetVariableDefinition presetVariableDefinitionAnnotation = field.getAnnotation(PresetVariableDefinition.class); + Class fieldClass = getClassOfField(field); + String presetVariableName = field.getName(); + + if (presetVariableDefinitionAnnotation == null) { + continue; + } + + if (StringUtils.isNotEmpty(recursiveVariableName)) { + presetVariableName = String.format("%s.%s", recursiveVariableName, field.getName()); + } + filterSupportedTypes(variables, quotaType, presetVariableDefinitionAnnotation, fieldClass, presetVariableName); + } + } + + /** + * Returns the class of the {@link Field} depending on its type. This method is required for retrieving the Class of Generic Types, i.e. {@link List}. + */ + protected Class getClassOfField(Field field){ + if (field.getGenericType() instanceof ParameterizedType) { + ParameterizedType genericType = (ParameterizedType) field.getGenericType(); + return (Class) genericType.getActualTypeArguments()[0]; + } + + return field.getType(); + } + + /** + * Checks if the {@link PresetVariableDefinition} supports the given {@link QuotaTypes}. If it supports it, it adds the preset variable to the {@link List} recursively + * if it is from the one of the classes in the {@link QuotaResponseBuilderImpl#assignableClasses} array or directly if not. + * + * @param variables {@link List} of the {@link Pair} of the preset variable and its description. + * @param quotaType the given {@link QuotaTypes} to filter. + * @param presetVariableDefinitionAnnotation used to check if the quotaType is supported. + * @param fieldClass class of the field used to verify if it is from the {@link GenericPresetVariable} or {@link ComputingResources} classes. If it is, then it calls + * {@link QuotaResponseBuilderImpl#addAllPresetVariables(Class, QuotaTypes, List, String)} to add the preset variable. Otherwise, the {@link Pair} is + * added directly to the variables {@link List}. + * @param presetVariableName {@link String} that contains the recursive created preset variable name. + */ + public void filterSupportedTypes(List> variables, QuotaTypes quotaType, PresetVariableDefinition presetVariableDefinitionAnnotation, Class fieldClass, + String presetVariableName) { + if (Arrays.stream(presetVariableDefinitionAnnotation.supportedTypes()).noneMatch(supportedType -> + supportedType == quotaType.getQuotaType() || supportedType == 0)) { + return; + } + + String presetVariableDescription = presetVariableDefinitionAnnotation.description(); + + Pair pair = new Pair<>(presetVariableName, presetVariableDescription); + variables.add(pair); + + if (isRecursivePresetVariable(fieldClass)) { + addAllPresetVariables(fieldClass, quotaType, variables, presetVariableName); + } + } + + /** + * Returns true if the {@link Class} of the {@link Field} is from one of the classes in the array {@link QuotaResponseBuilderImpl#assignableClasses}, i.e., it is a recursive + * {@link PresetVariables}, returns false otherwise. + */ + private boolean isRecursivePresetVariable(Class fieldClass) { + for (Class clazz : assignableClasses) { + if (clazz.isAssignableFrom(fieldClass)) { + return true; + } + } + return false; + } + + public List createQuotaPresetVariablesResponse(List> variables) { + final List responses = new ArrayList<>(); + + for (Pair variable : variables) { + responses.add(createPresetVariablesItemResponse(variable)); + } + + return responses; + } + + public QuotaPresetVariablesItemResponse createPresetVariablesItemResponse(Pair variable) { + QuotaPresetVariablesItemResponse response = new QuotaPresetVariablesItemResponse(); + response.setVariable(variable.first()); + response.setDescription(variable.second()); + return response; + } + @Override public Pair configureQuotaEmail(QuotaConfigureEmailCmd cmd) { validateQuotaConfigureEmailCmdParameters(cmd); diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaTariffResponse.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaTariffResponse.java index cec3634c76d..6d844d78427 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaTariffResponse.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaTariffResponse.java @@ -83,6 +83,11 @@ public class QuotaTariffResponse extends BaseResponse { @Param(description = "when the quota tariff was removed") private Date removed; + @SerializedName("position") + @Param(description = "position in the execution sequence for tariffs of the same type") + private Integer position; + + public QuotaTariffResponse() { super(); this.setObjectName("quotatariff"); @@ -172,4 +177,12 @@ public class QuotaTariffResponse extends BaseResponse { this.removed = removed; } + public Integer getPosition() { + return position; + } + + public void setPosition(Integer position) { + this.position = position; + } + } diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/quota/QuotaServiceImpl.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/quota/QuotaServiceImpl.java index 4bc41233096..17fa7bd8425 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/quota/QuotaServiceImpl.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/quota/QuotaServiceImpl.java @@ -33,6 +33,7 @@ import org.apache.cloudstack.api.command.QuotaEmailTemplateListCmd; import org.apache.cloudstack.api.command.QuotaEmailTemplateUpdateCmd; import org.apache.cloudstack.api.command.QuotaEnabledCmd; import org.apache.cloudstack.api.command.QuotaListEmailConfigurationCmd; +import org.apache.cloudstack.api.command.QuotaPresetVariablesListCmd; import org.apache.cloudstack.api.command.QuotaStatementCmd; import org.apache.cloudstack.api.command.QuotaSummaryCmd; import org.apache.cloudstack.api.command.QuotaTariffCreateCmd; @@ -119,6 +120,7 @@ public class QuotaServiceImpl extends ManagerBase implements QuotaService, Confi cmdList.add(QuotaTariffDeleteCmd.class); cmdList.add(QuotaConfigureEmailCmd.class); cmdList.add(QuotaListEmailConfigurationCmd.class); + cmdList.add(QuotaPresetVariablesListCmd.class); return cmdList; } diff --git a/plugins/database/quota/src/test/java/org/apache/cloudstack/api/command/QuotaTariffListCmdTest.java b/plugins/database/quota/src/test/java/org/apache/cloudstack/api/command/QuotaTariffListCmdTest.java index f5ce92ae014..a98d3d611de 100644 --- a/plugins/database/quota/src/test/java/org/apache/cloudstack/api/command/QuotaTariffListCmdTest.java +++ b/plugins/database/quota/src/test/java/org/apache/cloudstack/api/command/QuotaTariffListCmdTest.java @@ -16,15 +16,18 @@ // under the License. package org.apache.cloudstack.api.command; +import com.cloud.user.User; import junit.framework.TestCase; import org.apache.cloudstack.api.response.QuotaResponseBuilder; import org.apache.cloudstack.api.response.QuotaTariffResponse; +import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.quota.constant.QuotaTypes; import org.apache.cloudstack.quota.vo.QuotaTariffVO; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.Mockito; +import org.mockito.MockedStatic; import org.mockito.junit.MockitoJUnitRunner; import java.lang.reflect.Field; @@ -40,6 +43,12 @@ public class QuotaTariffListCmdTest extends TestCase { @Mock QuotaResponseBuilder responseBuilder; + @Mock + User userMock; + + @Mock + CallContext callContextMock; + @Test public void testQuotaTariffListCmd() throws NoSuchFieldException, IllegalAccessException { QuotaTariffListCmd cmd = new QuotaTariffListCmd(); @@ -48,17 +57,24 @@ public class QuotaTariffListCmdTest extends TestCase { rbField.setAccessible(true); rbField.set(cmd, responseBuilder); - List quotaTariffVOList = new ArrayList(); + List quotaTariffVOList = new ArrayList<>(); QuotaTariffVO tariff = new QuotaTariffVO(); tariff.setEffectiveOn(new Date()); tariff.setCurrencyValue(new BigDecimal(100)); tariff.setUsageType(QuotaTypes.VOLUME); quotaTariffVOList.add(new QuotaTariffVO()); - Mockito.when(responseBuilder.listQuotaTariffPlans(Mockito.eq(cmd))).thenReturn(new Pair<>(quotaTariffVOList, quotaTariffVOList.size())); - Mockito.when(responseBuilder.createQuotaTariffResponse(Mockito.any(QuotaTariffVO.class))).thenReturn(new QuotaTariffResponse()); - cmd.execute(); - Mockito.verify(responseBuilder, Mockito.times(1)).createQuotaTariffResponse(Mockito.any(QuotaTariffVO.class)); + try (MockedStatic callContextStaticMock = Mockito.mockStatic(CallContext.class)) { + Mockito.when(responseBuilder.listQuotaTariffPlans(Mockito.eq(cmd))).thenReturn(new Pair<>(quotaTariffVOList, quotaTariffVOList.size())); + callContextStaticMock.when(CallContext::current).thenReturn(callContextMock); + Mockito.when(callContextMock.getCallingUser()).thenReturn(userMock); + Mockito.when(responseBuilder.isUserAllowedToSeeActivationRules(userMock)).thenReturn(true); + Mockito.when(responseBuilder.createQuotaTariffResponse(Mockito.any(QuotaTariffVO.class), Mockito.eq(true))).thenReturn(new QuotaTariffResponse()); + + cmd.execute(); + } + + Mockito.verify(responseBuilder, Mockito.times(1)).createQuotaTariffResponse(Mockito.any(QuotaTariffVO.class), Mockito.eq(true)); } } diff --git a/plugins/database/quota/src/test/java/org/apache/cloudstack/api/command/QuotaTariffUpdateCmdTest.java b/plugins/database/quota/src/test/java/org/apache/cloudstack/api/command/QuotaTariffUpdateCmdTest.java index 22d78d6794e..7a4d1a75356 100644 --- a/plugins/database/quota/src/test/java/org/apache/cloudstack/api/command/QuotaTariffUpdateCmdTest.java +++ b/plugins/database/quota/src/test/java/org/apache/cloudstack/api/command/QuotaTariffUpdateCmdTest.java @@ -60,8 +60,8 @@ public class QuotaTariffUpdateCmdTest extends TestCase { } Mockito.when(responseBuilder.updateQuotaTariffPlan(Mockito.eq(cmd))).thenReturn(tariff); - Mockito.when(responseBuilder.createQuotaTariffResponse(Mockito.eq(tariff))).thenReturn(new QuotaTariffResponse()); + Mockito.when(responseBuilder.createQuotaTariffResponse(Mockito.eq(tariff), Mockito.eq(true))).thenReturn(new QuotaTariffResponse()); cmd.execute(); - Mockito.verify(responseBuilder, Mockito.times(1)).createQuotaTariffResponse(Mockito.eq(tariff)); + Mockito.verify(responseBuilder, Mockito.times(1)).createQuotaTariffResponse(Mockito.eq(tariff), Mockito.eq(true)); } } diff --git a/plugins/database/quota/src/test/java/org/apache/cloudstack/api/response/QuotaResponseBuilderImplTest.java b/plugins/database/quota/src/test/java/org/apache/cloudstack/api/response/QuotaResponseBuilderImplTest.java index 664863a1b90..fd359525893 100644 --- a/plugins/database/quota/src/test/java/org/apache/cloudstack/api/response/QuotaResponseBuilderImplTest.java +++ b/plugins/database/quota/src/test/java/org/apache/cloudstack/api/response/QuotaResponseBuilderImplTest.java @@ -29,6 +29,7 @@ import java.util.function.Consumer; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; +import com.cloud.utils.Pair; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.command.QuotaConfigureEmailCmd; import org.apache.cloudstack.api.command.QuotaEmailTemplateListCmd; @@ -36,6 +37,9 @@ import org.apache.cloudstack.api.command.QuotaEmailTemplateUpdateCmd; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.quota.QuotaService; import org.apache.cloudstack.quota.QuotaStatement; +import org.apache.cloudstack.quota.activationrule.presetvariables.PresetVariableDefinition; +import org.apache.cloudstack.quota.activationrule.presetvariables.PresetVariables; +import org.apache.cloudstack.quota.activationrule.presetvariables.Value; import org.apache.cloudstack.quota.constant.QuotaConfig; import org.apache.cloudstack.quota.constant.QuotaTypes; import org.apache.cloudstack.quota.dao.QuotaAccountDao; @@ -51,7 +55,10 @@ import org.apache.cloudstack.quota.vo.QuotaCreditsVO; import org.apache.cloudstack.quota.vo.QuotaEmailConfigurationVO; import org.apache.cloudstack.quota.vo.QuotaEmailTemplatesVO; import org.apache.cloudstack.quota.vo.QuotaTariffVO; +import org.apache.cloudstack.discovery.ApiDiscoveryService; + import org.apache.commons.lang3.time.DateUtils; + import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; @@ -65,6 +72,7 @@ import com.cloud.user.Account; import com.cloud.user.AccountVO; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; +import com.cloud.user.User; import junit.framework.TestCase; import org.mockito.junit.MockitoJUnitRunner; @@ -87,6 +95,12 @@ public class QuotaResponseBuilderImplTest extends TestCase { @Mock UserDao userDaoMock; + @Mock + User userMock; + + @Mock + ApiDiscoveryService discoveryServiceMock; + @Mock QuotaService quotaServiceMock; @@ -160,11 +174,29 @@ public class QuotaResponseBuilderImplTest extends TestCase { @Test public void testQuotaResponse() { QuotaTariffVO tariffVO = makeTariffTestData(); - QuotaTariffResponse response = quotaResponseBuilderSpy.createQuotaTariffResponse(tariffVO); + QuotaTariffResponse response = quotaResponseBuilderSpy.createQuotaTariffResponse(tariffVO, true); assertTrue(tariffVO.getUsageType() == response.getUsageType()); assertTrue(tariffVO.getCurrencyValue().equals(response.getTariffValue())); } + @Test + public void createQuotaTariffResponseTestIfReturnsActivationRuleWithPermission() { + QuotaTariffVO tariff = makeTariffTestData(); + tariff.setActivationRule("x === 10"); + + QuotaTariffResponse tariffResponse = quotaResponseBuilderSpy.createQuotaTariffResponse(tariff, true); + assertEquals("x === 10", tariffResponse.getActivationRule()); + } + + @Test + public void createQuotaTariffResponseTestIfReturnsActivationRuleWithoutPermission() { + QuotaTariffVO tariff = makeTariffTestData(); + tariff.setActivationRule("x === 10"); + + QuotaTariffResponse tariffResponse = quotaResponseBuilderSpy.createQuotaTariffResponse(tariff, false); + assertNull(tariffResponse.getActivationRule()); + } + @Test public void testAddQuotaCredits() { final long accountId = 2L; @@ -330,7 +362,7 @@ public class QuotaResponseBuilderImplTest extends TestCase { @Test public void validateEndDateOnCreatingNewQuotaTariffTestSetValidEndDate() { Date startDate = DateUtils.addDays(date, -100); - Date endDate = DateUtils.addMilliseconds(new Date(), 1); + Date endDate = DateUtils.addMinutes(new Date(), 1); quotaResponseBuilderSpy.validateEndDateOnCreatingNewQuotaTariff(quotaTariffVoMock, startDate, endDate); Mockito.verify(quotaTariffVoMock).setEndDate(Mockito.any(Date.class)); @@ -368,8 +400,10 @@ public class QuotaResponseBuilderImplTest extends TestCase { Mockito.doNothing().when(quotaResponseBuilderSpy).validateValueOnCreatingNewQuotaTariff(Mockito.any(QuotaTariffVO.class), Mockito.anyDouble()); Mockito.doNothing().when(quotaResponseBuilderSpy).validateStringsOnCreatingNewQuotaTariff(Mockito.any(Consumer.class), Mockito.anyString()); Mockito.doReturn(quotaTariffVoMock).when(quotaTariffDaoMock).addQuotaTariff(Mockito.any(QuotaTariffVO.class)); + Mockito.doNothing().when(quotaResponseBuilderSpy).validatePositionOnCreatingNewQuotaTariff(Mockito.any(QuotaTariffVO.class), Mockito.anyInt()); - quotaResponseBuilderSpy.persistNewQuotaTariff(quotaTariffVoMock, "", 1, date, 1l, date, 1.0, "", ""); + + quotaResponseBuilderSpy.persistNewQuotaTariff(quotaTariffVoMock, "", 1, date, 1l, date, 1.0, "", "", 2); Mockito.verify(quotaTariffDaoMock).addQuotaTariff(Mockito.any(QuotaTariffVO.class)); } @@ -419,6 +453,46 @@ public class QuotaResponseBuilderImplTest extends TestCase { assertTrue(quotaSummaryResponse.getQuotaEnabled()); } + @Test + public void filterSupportedTypesTestReturnWhenQuotaTypeDoesNotMatch() throws NoSuchFieldException { + List> variables = new ArrayList<>(); + Class clazz = Value.class; + PresetVariableDefinition presetVariableDefinitionAnnotation = clazz.getDeclaredField("host").getAnnotation(PresetVariableDefinition.class); + QuotaTypes quotaType = QuotaTypes.getQuotaType(QuotaTypes.NETWORK_OFFERING); + int expectedVariablesSize = 0; + + quotaResponseBuilderSpy.filterSupportedTypes(variables, quotaType, presetVariableDefinitionAnnotation, clazz, null); + + assertEquals(expectedVariablesSize, variables.size()); + } + + @Test + public void filterSupportedTypesTestAddPresetVariableWhenClassIsNotInstanceOfGenericPresetVariableAndComputingResource() throws NoSuchFieldException { + List> variables = new ArrayList<>(); + Class clazz = PresetVariables.class; + PresetVariableDefinition presetVariableDefinitionAnnotation = clazz.getDeclaredField("resourceType").getAnnotation(PresetVariableDefinition.class); + QuotaTypes quotaType = QuotaTypes.getQuotaType(QuotaTypes.NETWORK_OFFERING); + int expectedVariablesSize = 1; + String expectedVariableName = "variable.name"; + + quotaResponseBuilderSpy.filterSupportedTypes(variables, quotaType, presetVariableDefinitionAnnotation, clazz, "variable.name"); + + assertEquals(expectedVariablesSize, variables.size()); + assertEquals(expectedVariableName, variables.get(0).first()); + } + + @Test + public void filterSupportedTypesTestCallRecursiveMethodWhenIsGenericPresetVariableClassOrComputingResourceClass() throws NoSuchFieldException { + List> variables = new ArrayList<>(); + Class clazz = Value.class; + PresetVariableDefinition presetVariableDefinitionAnnotation = clazz.getDeclaredField("storage").getAnnotation(PresetVariableDefinition.class); + QuotaTypes quotaType = QuotaTypes.getQuotaType(QuotaTypes.VOLUME); + + quotaResponseBuilderSpy.filterSupportedTypes(variables, quotaType, presetVariableDefinitionAnnotation, clazz, "variable.name"); + + Mockito.verify(quotaResponseBuilderSpy, Mockito.atLeastOnce()).addAllPresetVariables(Mockito.any(), Mockito.any(QuotaTypes.class), Mockito.anyList(), + Mockito.anyString()); + } @Test (expected = InvalidParameterValueException.class) public void validateQuotaConfigureEmailCmdParametersTestNullQuotaAccount() { @@ -442,7 +516,6 @@ public class QuotaResponseBuilderImplTest extends TestCase { quotaResponseBuilderSpy.validateQuotaConfigureEmailCmdParameters(quotaConfigureEmailCmdMock); } - @Test public void validateQuotaConfigureEmailCmdParametersTestNullTemplateName() { Mockito.doReturn(quotaAccountVOMock).when(quotaAccountDaoMock).findByIdQuotaAccount(Mockito.any()); @@ -510,4 +583,66 @@ public class QuotaResponseBuilderImplTest extends TestCase { assertEquals(2, result.getEmailTemplateId()); assertFalse(result.isEnabled()); } + + @Test + public void validatePositionOnCreatingNewQuotaTariffTestNullValueDoNothing() { + quotaResponseBuilderSpy.validatePositionOnCreatingNewQuotaTariff(quotaTariffVoMock, null); + Mockito.verify(quotaTariffVoMock, Mockito.never()).setPosition(Mockito.any()); + } + + @Test + public void validatePositionOnCreatingNewQuotaTariffTestAnyValueIsSet() { + Integer position = 1; + quotaResponseBuilderSpy.validatePositionOnCreatingNewQuotaTariff(quotaTariffVoMock, position); + Mockito.verify(quotaTariffVoMock).setPosition(position); + } + + + @Test + public void isUserAllowedToSeeActivationRulesTestWithPermissionToCreateTariff() { + ApiDiscoveryResponse response = new ApiDiscoveryResponse(); + response.setName("quotaTariffCreate"); + + List cmdList = new ArrayList<>(); + cmdList.add(response); + + ListResponse responseList = new ListResponse<>(); + responseList.setResponses(cmdList); + + Mockito.doReturn(responseList).when(discoveryServiceMock).listApis(userMock, null); + + assertTrue(quotaResponseBuilderSpy.isUserAllowedToSeeActivationRules(userMock)); + } + + @Test + public void isUserAllowedToSeeActivationRulesTestWithPermissionToUpdateTariff() { + ApiDiscoveryResponse response = new ApiDiscoveryResponse(); + response.setName("quotaTariffUpdate"); + + List cmdList = new ArrayList<>(); + cmdList.add(response); + + ListResponse responseList = new ListResponse<>(); + responseList.setResponses(cmdList); + + Mockito.doReturn(responseList).when(discoveryServiceMock).listApis(userMock, null); + + assertTrue(quotaResponseBuilderSpy.isUserAllowedToSeeActivationRules(userMock)); + } + + @Test + public void isUserAllowedToSeeActivationRulesTestWithNoPermission() { + ApiDiscoveryResponse response = new ApiDiscoveryResponse(); + response.setName("testCmd"); + + List cmdList = new ArrayList<>(); + cmdList.add(response); + + ListResponse responseList = new ListResponse<>(); + responseList.setResponses(cmdList); + + Mockito.doReturn(responseList).when(discoveryServiceMock).listApis(userMock, null); + + assertFalse(quotaResponseBuilderSpy.isUserAllowedToSeeActivationRules(userMock)); + } } diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java index 9060eccb64a..4f1db396b7c 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java @@ -23,6 +23,7 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.commons.lang3.StringUtils; import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupService; import org.apache.cloudstack.affinity.dao.AffinityGroupDao; @@ -236,7 +237,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { @Override public List doInTransaction(TransactionStatus status) { // find or create the affinity group by name under this account/domain - AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountIdFinal); + AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountIdFinal, DedicatedResources.Type.Zone); if (group == null) { logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); @@ -372,10 +373,10 @@ public class DedicatedResourceManagerImpl implements DedicatedService { @Override public List doInTransaction(TransactionStatus status) { // find or create the affinity group by name under this account/domain - AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountIdFinal); + AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountIdFinal, DedicatedResources.Type.Pod); if (group == null) { - logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); - throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); + logger.error("Unable to dedicate pod due to, failed to create dedication affinity group"); + throw new CloudRuntimeException("Failed to dedicate pod. Please contact Cloud Support."); } DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(null, podId, null, null, null, null, group.getId()); try { @@ -485,10 +486,10 @@ public class DedicatedResourceManagerImpl implements DedicatedService { @Override public List doInTransaction(TransactionStatus status) { // find or create the affinity group by name under this account/domain - AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountIdFinal); + AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountIdFinal, DedicatedResources.Type.Cluster); if (group == null) { - logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); - throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); + logger.error("Unable to dedicate cluster due to, failed to create dedication affinity group"); + throw new CloudRuntimeException("Failed to dedicate cluster. Please contact Cloud Support."); } DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(null, null, clusterId, null, null, null, group.getId()); try { @@ -582,10 +583,10 @@ public class DedicatedResourceManagerImpl implements DedicatedService { @Override public List doInTransaction(TransactionStatus status) { // find or create the affinity group by name under this account/domain - AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountIdFinal); + AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountIdFinal, DedicatedResources.Type.Host); if (group == null) { - logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); - throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); + logger.error("Unable to dedicate host due to, failed to create dedication affinity group"); + throw new CloudRuntimeException("Failed to dedicate host. Please contact Cloud Support."); } DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(null, null, null, hostId, null, null, group.getId()); try { @@ -607,7 +608,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { } - private AffinityGroup findOrCreateDedicatedAffinityGroup(Long domainId, Long accountId) { + private AffinityGroup findOrCreateDedicatedAffinityGroup(Long domainId, Long accountId, DedicatedResources.Type dedicatedResource) { if (domainId == null) { return null; } @@ -624,24 +625,25 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (group != null) { return group; } - // default to a groupname with account/domain information - affinityGroupName = "DedicatedGrp-" + accountName; + // defaults to a groupName with resourceType and account/domain information + affinityGroupName = String.format("Dedicated%sGrp-%s", dedicatedResource, accountName); } else { // domain level group group = _affinityGroupDao.findDomainLevelGroupByType(domainId, "ExplicitDedication"); if (group != null) { return group; } - // default to a groupname with account/domain information + + // defaults to a groupName with resourceType and account/domain information String domainName = _domainDao.findById(domainId).getName(); - affinityGroupName = "DedicatedGrp-domain-" + domainName; + affinityGroupName = String.format("Dedicated%sGrp-domain-%s", dedicatedResource, domainName); } - group = _affinityGroupService.createAffinityGroup(accountName, null, domainId, affinityGroupName, "ExplicitDedication", "dedicated resources group"); + String description = String.format("Dedicated %s group", StringUtils.lowerCase(dedicatedResource.toString())); + group = _affinityGroupService.createAffinityGroup(accountName, null, domainId, affinityGroupName, "ExplicitDedication", description); return group; - } private List getVmsOnHost(long hostId) { diff --git a/plugins/dedicated-resources/src/main/resources/META-INF/cloudstack/core/spring-dedicated-resources-core-context.xml b/plugins/dedicated-resources/src/main/resources/META-INF/cloudstack/core/spring-dedicated-resources-core-context.xml index c35031d9c39..2680d8a9067 100644 --- a/plugins/dedicated-resources/src/main/resources/META-INF/cloudstack/core/spring-dedicated-resources-core-context.xml +++ b/plugins/dedicated-resources/src/main/resources/META-INF/cloudstack/core/spring-dedicated-resources-core-context.xml @@ -29,5 +29,5 @@ - + diff --git a/plugins/deployment-planners/implicit-dedication/src/main/resources/META-INF/cloudstack/implicit-dedication/spring-implicit-dedication-context.xml b/plugins/deployment-planners/implicit-dedication/src/main/resources/META-INF/cloudstack/implicit-dedication/spring-implicit-dedication-context.xml index 3f1a715bfc5..01d18a09fa4 100644 --- a/plugins/deployment-planners/implicit-dedication/src/main/resources/META-INF/cloudstack/implicit-dedication/spring-implicit-dedication-context.xml +++ b/plugins/deployment-planners/implicit-dedication/src/main/resources/META-INF/cloudstack/implicit-dedication/spring-implicit-dedication-context.xml @@ -1,12 +1,12 @@ - - + diff --git a/plugins/event-bus/inmemory/src/main/java/org/apache/cloudstack/mom/inmemory/InMemoryEventBus.java b/plugins/event-bus/inmemory/src/main/java/org/apache/cloudstack/mom/inmemory/InMemoryEventBus.java index d5d36278192..0c00c0639fd 100644 --- a/plugins/event-bus/inmemory/src/main/java/org/apache/cloudstack/mom/inmemory/InMemoryEventBus.java +++ b/plugins/event-bus/inmemory/src/main/java/org/apache/cloudstack/mom/inmemory/InMemoryEventBus.java @@ -60,6 +60,8 @@ public class InMemoryEventBus extends ManagerBase implements EventBus { if (subscriber == null || topic == null) { throw new EventBusException("Invalid EventSubscriber/EventTopic object passed."); } + logger.debug("subscribing '{}' to events of type '{}' from '{}'", subscriber.toString(), topic.getEventType(), topic.getEventSource()); + UUID subscriberId = UUID.randomUUID(); subscribers.put(subscriberId, new Pair(topic, subscriber)); @@ -68,6 +70,7 @@ public class InMemoryEventBus extends ManagerBase implements EventBus { @Override public void unsubscribe(UUID subscriberId, EventSubscriber subscriber) throws EventBusException { + logger.debug("unsubscribing '{}'", subscriberId); if (subscriberId == null) { throw new EventBusException("Cannot unregister a null subscriberId."); } @@ -85,7 +88,9 @@ public class InMemoryEventBus extends ManagerBase implements EventBus { @Override public void publish(Event event) throws EventBusException { + logger.trace("publish '{}'", event.getDescription()); if (subscribers == null || subscribers.isEmpty()) { + logger.trace("no subscribers, no publish"); return; // no subscriber to publish to, so just return } diff --git a/plugins/event-bus/kafka/src/main/java/org/apache/cloudstack/mom/kafka/KafkaEventBus.java b/plugins/event-bus/kafka/src/main/java/org/apache/cloudstack/mom/kafka/KafkaEventBus.java index 01888779fc6..f2589d2d7d0 100644 --- a/plugins/event-bus/kafka/src/main/java/org/apache/cloudstack/mom/kafka/KafkaEventBus.java +++ b/plugins/event-bus/kafka/src/main/java/org/apache/cloudstack/mom/kafka/KafkaEventBus.java @@ -87,19 +87,23 @@ public class KafkaEventBus extends ManagerBase implements EventBus { @Override public UUID subscribe(EventTopic topic, EventSubscriber subscriber) throws EventBusException { + logger.debug("subscribing '{}' to events of type '{}' from '{}'", subscriber.toString(), topic.getEventType(), topic.getEventSource()); + /* NOOP */ return UUID.randomUUID(); } @Override public void unsubscribe(UUID subscriberId, EventSubscriber subscriber) throws EventBusException { + logger.debug("unsubscribing '{}'", subscriberId); /* NOOP */ } @Override public void publish(Event event) throws EventBusException { - ProducerRecord record = new ProducerRecord(_topic, event.getResourceUUID(), event.getDescription()); - _producer.send(record); + logger.trace("publish '{}'", event.getDescription()); + ProducerRecord newRecord = new ProducerRecord<>(_topic, event.getResourceUUID(), event.getDescription()); + _producer.send(newRecord); } @Override diff --git a/plugins/event-bus/rabbitmq/src/main/java/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java b/plugins/event-bus/rabbitmq/src/main/java/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java index 8cd2289f9f3..e8067e75b40 100644 --- a/plugins/event-bus/rabbitmq/src/main/java/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java +++ b/plugins/event-bus/rabbitmq/src/main/java/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java @@ -185,11 +185,12 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { */ @Override public UUID subscribe(EventTopic topic, EventSubscriber subscriber) throws EventBusException { - if (subscriber == null || topic == null) { throw new EventBusException("Invalid EventSubscriber/EventTopic object passed."); } + logger.debug("subscribing '{}' to events of type '{}' from '{}'", subscriber.toString(), topic.getEventType(), topic.getEventSource()); + // create a UUID, that will be used for managing subscriptions and also used as queue name // for on the queue used for the subscriber on the AMQP broker UUID queueId = UUID.randomUUID(); @@ -250,6 +251,7 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { @Override public void unsubscribe(UUID subscriberId, EventSubscriber subscriber) throws EventBusException { + logger.debug("unsubscribing '{}'", subscriberId); try { String classname = subscriber.getClass().getName(); String queueName = UUID.nameUUIDFromBytes(classname.getBytes()).toString(); @@ -265,6 +267,7 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { // publish event on to the exchange created on AMQP server @Override public void publish(Event event) throws EventBusException { + logger.trace("publish '{}'", event.getDescription()); String routingKey = createRoutingKey(event); String eventDescription = event.getDescription(); diff --git a/plugins/event-bus/webhook/pom.xml b/plugins/event-bus/webhook/pom.xml new file mode 100644 index 00000000000..278f4dc0ec5 --- /dev/null +++ b/plugins/event-bus/webhook/pom.xml @@ -0,0 +1,46 @@ + + + 4.0.0 + cloud-mom-webhook + Apache CloudStack Plugin - Webhook Event Bus + + org.apache.cloudstack + cloudstack-plugins + 4.20.0.0-SNAPSHOT + ../../pom.xml + + + + org.apache.cloudstack + cloud-framework-events + ${project.version} + + + org.apache.cloudstack + cloud-engine-api + ${project.version} + + + org.json + json + + + diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/Webhook.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/Webhook.java new file mode 100644 index 00000000000..1cc73ae31df --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/Webhook.java @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook; + +import java.util.Date; + +import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + +public interface Webhook extends ControlledEntity, Identity, InternalIdentity { + public static final long ID_DUMMY = 0L; + public static final String NAME_DUMMY = "Test"; + enum State { + Enabled, Disabled; + }; + + enum Scope { + Local, Domain, Global; + }; + + long getId(); + String getName(); + String getDescription(); + State getState(); + long getDomainId(); + long getAccountId(); + String getPayloadUrl(); + String getSecretKey(); + boolean isSslVerification(); + Scope getScope(); + Date getCreated(); +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookApiService.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookApiService.java new file mode 100644 index 00000000000..edd77e5b414 --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookApiService.java @@ -0,0 +1,44 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook; + +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.mom.webhook.api.command.user.CreateWebhookCmd; +import org.apache.cloudstack.mom.webhook.api.command.user.DeleteWebhookCmd; +import org.apache.cloudstack.mom.webhook.api.command.user.DeleteWebhookDeliveryCmd; +import org.apache.cloudstack.mom.webhook.api.command.user.ExecuteWebhookDeliveryCmd; +import org.apache.cloudstack.mom.webhook.api.command.user.ListWebhookDeliveriesCmd; +import org.apache.cloudstack.mom.webhook.api.command.user.ListWebhooksCmd; +import org.apache.cloudstack.mom.webhook.api.command.user.UpdateWebhookCmd; +import org.apache.cloudstack.mom.webhook.api.response.WebhookDeliveryResponse; +import org.apache.cloudstack.mom.webhook.api.response.WebhookResponse; + +import com.cloud.utils.component.PluggableService; +import com.cloud.utils.exception.CloudRuntimeException; + +public interface WebhookApiService extends PluggableService { + + ListResponse listWebhooks(ListWebhooksCmd cmd); + WebhookResponse createWebhook(CreateWebhookCmd cmd) throws CloudRuntimeException; + boolean deleteWebhook(DeleteWebhookCmd cmd) throws CloudRuntimeException; + WebhookResponse updateWebhook(UpdateWebhookCmd cmd) throws CloudRuntimeException; + WebhookResponse createWebhookResponse(long webhookId); + ListResponse listWebhookDeliveries(ListWebhookDeliveriesCmd cmd); + int deleteWebhookDelivery(DeleteWebhookDeliveryCmd cmd) throws CloudRuntimeException; + WebhookDeliveryResponse executeWebhookDelivery(ExecuteWebhookDeliveryCmd cmd) throws CloudRuntimeException; +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookApiServiceImpl.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookApiServiceImpl.java new file mode 100644 index 00000000000..187b140d5d8 --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookApiServiceImpl.java @@ -0,0 +1,574 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook; + +import java.net.URI; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.stream.Collectors; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.SecurityChecker; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.mom.webhook.api.command.user.CreateWebhookCmd; +import org.apache.cloudstack.mom.webhook.api.command.user.DeleteWebhookCmd; +import org.apache.cloudstack.mom.webhook.api.command.user.DeleteWebhookDeliveryCmd; +import org.apache.cloudstack.mom.webhook.api.command.user.ExecuteWebhookDeliveryCmd; +import org.apache.cloudstack.mom.webhook.api.command.user.ListWebhookDeliveriesCmd; +import org.apache.cloudstack.mom.webhook.api.command.user.ListWebhooksCmd; +import org.apache.cloudstack.mom.webhook.api.command.user.UpdateWebhookCmd; +import org.apache.cloudstack.mom.webhook.api.response.WebhookDeliveryResponse; +import org.apache.cloudstack.mom.webhook.api.response.WebhookResponse; +import org.apache.cloudstack.mom.webhook.dao.WebhookDao; +import org.apache.cloudstack.mom.webhook.dao.WebhookDeliveryDao; +import org.apache.cloudstack.mom.webhook.dao.WebhookDeliveryJoinDao; +import org.apache.cloudstack.mom.webhook.dao.WebhookJoinDao; +import org.apache.cloudstack.mom.webhook.vo.WebhookDeliveryJoinVO; +import org.apache.cloudstack.mom.webhook.vo.WebhookDeliveryVO; +import org.apache.cloudstack.mom.webhook.vo.WebhookJoinVO; +import org.apache.cloudstack.mom.webhook.vo.WebhookVO; +import org.apache.commons.lang3.ObjectUtils; +import org.apache.commons.lang3.StringUtils; + +import com.cloud.api.ApiResponseHelper; +import com.cloud.cluster.ManagementServerHostVO; +import com.cloud.cluster.dao.ManagementServerHostDao; +import com.cloud.domain.Domain; +import com.cloud.domain.dao.DomainDao; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.projects.Project; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.utils.Pair; +import com.cloud.utils.Ternary; +import com.cloud.utils.UriUtils; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.rest.HttpConstants; + +public class WebhookApiServiceImpl extends ManagerBase implements WebhookApiService { + + @Inject + AccountManager accountManager; + @Inject + DomainDao domainDao; + @Inject + WebhookDao webhookDao; + @Inject + WebhookJoinDao webhookJoinDao; + @Inject + WebhookDeliveryDao webhookDeliveryDao; + @Inject + WebhookDeliveryJoinDao webhookDeliveryJoinDao; + @Inject + ManagementServerHostDao managementServerHostDao; + @Inject + WebhookService webhookService; + + protected WebhookResponse createWebhookResponse(WebhookJoinVO webhookVO) { + WebhookResponse response = new WebhookResponse(); + response.setObjectName("webhook"); + response.setId(webhookVO.getUuid()); + response.setName(webhookVO.getName()); + response.setDescription(webhookVO.getDescription()); + ApiResponseHelper.populateOwner(response, webhookVO); + response.setState(webhookVO.getState().toString()); + response.setPayloadUrl(webhookVO.getPayloadUrl()); + response.setSecretKey(webhookVO.getSecretKey()); + response.setSslVerification(webhookVO.isSslVerification()); + response.setScope(webhookVO.getScope().toString()); + response.setCreated(webhookVO.getCreated()); + return response; + } + + protected List getIdsOfAccessibleWebhooks(Account caller) { + if (Account.Type.ADMIN.equals(caller.getType())) { + return new ArrayList<>(); + } + String domainPath = null; + if (Account.Type.DOMAIN_ADMIN.equals(caller.getType())) { + Domain domain = domainDao.findById(caller.getDomainId()); + domainPath = domain.getPath(); + } + List webhooks = webhookJoinDao.listByAccountOrDomain(caller.getId(), domainPath); + return webhooks.stream().map(WebhookJoinVO::getId).collect(Collectors.toList()); + } + + protected ManagementServerHostVO basicWebhookDeliveryApiCheck(Account caller, final Long id, final Long webhookId, + final Long managementServerId, final Date startDate, final Date endDate) { + if (id != null) { + WebhookDeliveryVO webhookDeliveryVO = webhookDeliveryDao.findById(id); + if (webhookDeliveryVO == null) { + throw new InvalidParameterValueException("Invalid ID specified"); + } + WebhookVO webhookVO = webhookDao.findById(webhookDeliveryVO.getWebhookId()); + if (webhookVO != null) { + accountManager.checkAccess(caller, SecurityChecker.AccessType.OperateEntry, false, webhookVO); + } + } + if (webhookId != null) { + WebhookVO webhookVO = webhookDao.findById(webhookId); + if (webhookVO == null) { + throw new InvalidParameterValueException("Invalid Webhook specified"); + } + accountManager.checkAccess(caller, SecurityChecker.AccessType.OperateEntry, false, webhookVO); + } + if (endDate != null && startDate != null && endDate.before(startDate)) { + throw new InvalidParameterValueException(String.format("Invalid %s specified", ApiConstants.END_DATE)); + } + ManagementServerHostVO managementServerHostVO = null; + if (managementServerId != null) { + if (!Account.Type.ADMIN.equals(caller.getType())) { + throw new PermissionDeniedException("Invalid parameter specified"); + } + managementServerHostVO = managementServerHostDao.findById(managementServerId); + if (managementServerHostVO == null) { + throw new InvalidParameterValueException("Invalid management server specified"); + } + } + return managementServerHostVO; + } + + protected WebhookDeliveryResponse createWebhookDeliveryResponse(WebhookDeliveryJoinVO webhookDeliveryVO) { + WebhookDeliveryResponse response = new WebhookDeliveryResponse(); + response.setObjectName(WebhookDelivery.class.getSimpleName().toLowerCase()); + response.setId(webhookDeliveryVO.getUuid()); + response.setEventId(webhookDeliveryVO.getEventUuid()); + response.setEventType(webhookDeliveryVO.getEventType()); + response.setWebhookId(webhookDeliveryVO.getWebhookUuId()); + response.setWebhookName(webhookDeliveryVO.getWebhookName()); + response.setManagementServerId(webhookDeliveryVO.getManagementServerUuId()); + response.setManagementServerName(webhookDeliveryVO.getManagementServerName()); + response.setHeaders(webhookDeliveryVO.getHeaders()); + response.setPayload(webhookDeliveryVO.getPayload()); + response.setSuccess(webhookDeliveryVO.isSuccess()); + response.setResponse(webhookDeliveryVO.getResponse()); + response.setStartTime(webhookDeliveryVO.getStartTime()); + response.setEndTime(webhookDeliveryVO.getEndTime()); + return response; + } + + protected WebhookDeliveryResponse createTestWebhookDeliveryResponse(WebhookDelivery webhookDelivery, + Webhook webhook) { + WebhookDeliveryResponse response = new WebhookDeliveryResponse(); + response.setObjectName(WebhookDelivery.class.getSimpleName().toLowerCase()); + response.setId(webhookDelivery.getUuid()); + response.setEventType(WebhookDelivery.TEST_EVENT_TYPE); + if (webhook != null) { + response.setWebhookId(webhook.getUuid()); + response.setWebhookName(webhook.getName()); + } + ManagementServerHostVO msHost = + managementServerHostDao.findByMsid(webhookDelivery.getManagementServerId()); + if (msHost != null) { + response.setManagementServerId(msHost.getUuid()); + response.setManagementServerName(msHost.getName()); + } + response.setHeaders(webhookDelivery.getHeaders()); + response.setPayload(webhookDelivery.getPayload()); + response.setSuccess(webhookDelivery.isSuccess()); + response.setResponse(webhookDelivery.getResponse()); + response.setStartTime(webhookDelivery.getStartTime()); + response.setEndTime(webhookDelivery.getEndTime()); + return response; + } + + /** + * @param cmd + * @return Account + */ + protected Account getOwner(final CreateWebhookCmd cmd) { + final Account caller = CallContext.current().getCallingAccount(); + return accountManager.finalizeOwner(caller, cmd.getAccountName(), cmd.getDomainId(), cmd.getProjectId()); + } + + protected String getNormalizedPayloadUrl(String payloadUrl) { + if (StringUtils.isBlank(payloadUrl) || payloadUrl.startsWith("http://") || payloadUrl.startsWith("https://")) { + return payloadUrl; + } + return String.format("http://%s", payloadUrl); + } + + protected void validateWebhookOwnerPayloadUrl(Account owner, String payloadUrl, Webhook currentWebhook) { + WebhookVO webhookVO = webhookDao.findByAccountAndPayloadUrl(owner.getId(), payloadUrl); + if (webhookVO == null) { + return; + } + if (currentWebhook != null && webhookVO.getId() == currentWebhook.getId()) { + return; + } + String error = String.format("Payload URL: %s is already in use by another webhook", payloadUrl); + logger.error(String.format("%s: %s for Account [%s]", error, webhookVO, owner)); + throw new InvalidParameterValueException(error); + } + + @Override + public ListResponse listWebhooks(ListWebhooksCmd cmd) { + final CallContext ctx = CallContext.current(); + final Account caller = ctx.getCallingAccount(); + final Long clusterId = cmd.getId(); + final String stateStr = cmd.getState(); + final String name = cmd.getName(); + final String keyword = cmd.getKeyword(); + final String scopeStr = cmd.getScope(); + List responsesList = new ArrayList<>(); + List permittedAccounts = new ArrayList<>(); + Ternary domainIdRecursiveListProject = + new Ternary<>(cmd.getDomainId(), cmd.isRecursive(), null); + accountManager.buildACLSearchParameters(caller, clusterId, cmd.getAccountName(), cmd.getProjectId(), + permittedAccounts, domainIdRecursiveListProject, cmd.listAll(), false); + Long domainId = domainIdRecursiveListProject.first(); + Boolean isRecursive = domainIdRecursiveListProject.second(); + Project.ListProjectResourcesCriteria listProjectResourcesCriteria = domainIdRecursiveListProject.third(); + + + Filter searchFilter = new Filter(WebhookJoinVO.class, "id", true, cmd.getStartIndex(), + cmd.getPageSizeVal()); + SearchBuilder sb = webhookJoinDao.createSearchBuilder(); + accountManager.buildACLSearchBuilder(sb, domainId, isRecursive, permittedAccounts, + listProjectResourcesCriteria); + sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.EQ); + sb.and("keyword", sb.entity().getName(), SearchCriteria.Op.LIKE); + sb.and("state", sb.entity().getState(), SearchCriteria.Op.EQ); + sb.and("scope", sb.entity().getScope(), SearchCriteria.Op.EQ); + SearchCriteria sc = sb.create(); + accountManager.buildACLSearchCriteria(sc, domainId, isRecursive, permittedAccounts, + listProjectResourcesCriteria); + Webhook.Scope scope = null; + if (StringUtils.isNotEmpty(scopeStr)) { + try { + scope = Webhook.Scope.valueOf(scopeStr); + } catch (IllegalArgumentException iae) { + throw new InvalidParameterValueException("Invalid scope specified"); + } + } + if ((Webhook.Scope.Global.equals(scope) && !Account.Type.ADMIN.equals(caller.getType())) || + (Webhook.Scope.Domain.equals(scope) && + !List.of(Account.Type.ADMIN, Account.Type.DOMAIN_ADMIN).contains(caller.getType()))) { + throw new InvalidParameterValueException(String.format("Scope %s can not be specified", scope)); + } + Webhook.State state = null; + if (StringUtils.isNotEmpty(stateStr)) { + try { + state = Webhook.State.valueOf(stateStr); + } catch (IllegalArgumentException iae) { + throw new InvalidParameterValueException("Invalid state specified"); + } + } + if (scope != null) { + sc.setParameters("scope", scope.name()); + } + if (state != null) { + sc.setParameters("state", state.name()); + } + if(keyword != null){ + sc.setParameters("keyword", "%" + keyword + "%"); + } + if (clusterId != null) { + sc.setParameters("id", clusterId); + } + if (name != null) { + sc.setParameters("name", name); + } + Pair, Integer> webhooksAndCount = webhookJoinDao.searchAndCount(sc, searchFilter); + for (WebhookJoinVO webhook : webhooksAndCount.first()) { + WebhookResponse response = createWebhookResponse(webhook); + responsesList.add(response); + } + ListResponse response = new ListResponse<>(); + response.setResponses(responsesList, webhooksAndCount.second()); + return response; + } + + @Override + public WebhookResponse createWebhook(CreateWebhookCmd cmd) throws CloudRuntimeException { + final Account owner = getOwner(cmd); + final String name = cmd.getName(); + final String description = cmd.getDescription(); + final String payloadUrl = getNormalizedPayloadUrl(cmd.getPayloadUrl()); + final String secretKey = cmd.getSecretKey(); + final boolean sslVerification = cmd.isSslVerification(); + final String scopeStr = cmd.getScope(); + final String stateStr = cmd.getState(); + Webhook.Scope scope = Webhook.Scope.Local; + if (StringUtils.isNotEmpty(scopeStr)) { + try { + scope = Webhook.Scope.valueOf(scopeStr); + } catch (IllegalArgumentException iae) { + throw new InvalidParameterValueException("Invalid scope specified"); + } + } + if ((Webhook.Scope.Global.equals(scope) && !Account.Type.ADMIN.equals(owner.getType())) || + (Webhook.Scope.Domain.equals(scope) && + !List.of(Account.Type.ADMIN, Account.Type.DOMAIN_ADMIN).contains(owner.getType()))) { + throw new InvalidParameterValueException( + String.format("Scope %s can not be specified for owner %s", scope, owner.getName())); + } + Webhook.State state = Webhook.State.Enabled; + if (StringUtils.isNotEmpty(stateStr)) { + try { + state = Webhook.State.valueOf(stateStr); + } catch (IllegalArgumentException iae) { + throw new InvalidParameterValueException("Invalid state specified"); + } + } + UriUtils.validateUrl(payloadUrl); + validateWebhookOwnerPayloadUrl(owner, payloadUrl, null); + URI uri = URI.create(payloadUrl); + if (sslVerification && !HttpConstants.HTTPS.equalsIgnoreCase(uri.getScheme())) { + throw new InvalidParameterValueException( + String.format("SSL verification can be specified only for HTTPS URLs, %s", payloadUrl)); + } + long domainId = owner.getDomainId(); + Long cmdDomainId = cmd.getDomainId(); + if (cmdDomainId != null && + List.of(Account.Type.ADMIN, Account.Type.DOMAIN_ADMIN).contains(owner.getType()) && + Webhook.Scope.Domain.equals(scope)) { + domainId = cmdDomainId; + } + WebhookVO webhook = new WebhookVO(name, description, state, domainId, owner.getId(), payloadUrl, secretKey, + sslVerification, scope); + webhook = webhookDao.persist(webhook); + return createWebhookResponse(webhook.getId()); + } + + @Override + public boolean deleteWebhook(DeleteWebhookCmd cmd) throws CloudRuntimeException { + final Account caller = CallContext.current().getCallingAccount(); + final long id = cmd.getId(); + Webhook webhook = webhookDao.findById(id); + if (webhook == null) { + throw new InvalidParameterValueException("Unable to find the webhook with the specified ID"); + } + accountManager.checkAccess(caller, SecurityChecker.AccessType.OperateEntry, false, webhook); + return webhookDao.remove(id); + } + + @Override + public WebhookResponse updateWebhook(UpdateWebhookCmd cmd) throws CloudRuntimeException { + final Account caller = CallContext.current().getCallingAccount(); + final long id = cmd.getId(); + final String name = cmd.getName(); + final String description = cmd.getDescription(); + final String payloadUrl = getNormalizedPayloadUrl(cmd.getPayloadUrl()); + String secretKey = cmd.getSecretKey(); + final Boolean sslVerification = cmd.isSslVerification(); + final String scopeStr = cmd.getScope(); + final String stateStr = cmd.getState(); + WebhookVO webhook = webhookDao.findById(id); + if (webhook == null) { + throw new InvalidParameterValueException("Unable to find the webhook with the specified ID"); + } + accountManager.checkAccess(caller, SecurityChecker.AccessType.OperateEntry, false, webhook); + boolean updateNeeded = false; + if (StringUtils.isNotBlank(name)) { + webhook.setName(name); + updateNeeded = true; + } + if (description != null) { + webhook.setDescription(description); + updateNeeded = true; + } + if (StringUtils.isNotEmpty(stateStr)) { + try { + Webhook.State state = Webhook.State.valueOf(stateStr); + webhook.setState(state); + updateNeeded = true; + } catch (IllegalArgumentException iae) { + throw new InvalidParameterValueException("Invalid state specified"); + } + } + Account owner = accountManager.getAccount(webhook.getAccountId()); + if (StringUtils.isNotEmpty(scopeStr)) { + try { + Webhook.Scope scope = Webhook.Scope.valueOf(scopeStr); + if ((Webhook.Scope.Global.equals(scope) && !Account.Type.ADMIN.equals(owner.getType())) || + (Webhook.Scope.Domain.equals(scope) && + !List.of(Account.Type.ADMIN, Account.Type.DOMAIN_ADMIN).contains(owner.getType()))) { + throw new InvalidParameterValueException( + String.format("Scope %s can not be specified for owner %s", scope, owner.getName())); + } + webhook.setScope(scope); + updateNeeded = true; + } catch (IllegalArgumentException iae) { + throw new InvalidParameterValueException("Invalid scope specified"); + } + } + URI uri = URI.create(webhook.getPayloadUrl()); + if (StringUtils.isNotEmpty(payloadUrl)) { + UriUtils.validateUrl(payloadUrl); + validateWebhookOwnerPayloadUrl(owner, payloadUrl, webhook); + uri = URI.create(payloadUrl); + webhook.setPayloadUrl(payloadUrl); + updateNeeded = true; + } + if (sslVerification != null) { + if (Boolean.TRUE.equals(sslVerification) && !HttpConstants.HTTPS.equalsIgnoreCase(uri.getScheme())) { + throw new InvalidParameterValueException( + String.format("SSL verification can be specified only for HTTPS URLs, %s", payloadUrl)); + } + webhook.setSslVerification(sslVerification); + updateNeeded = true; + } + if (secretKey != null) { + if (StringUtils.isBlank(secretKey)) { + secretKey = null; + } + webhook.setSecretKey(secretKey); + updateNeeded = true; + } + if (updateNeeded && !webhookDao.update(id, webhook)) { + return null; + } + return createWebhookResponse(webhook.getId()); + } + + @Override + public WebhookResponse createWebhookResponse(long webhookId) { + WebhookJoinVO webhookVO = webhookJoinDao.findById(webhookId); + return createWebhookResponse(webhookVO); + } + + @Override + public ListResponse listWebhookDeliveries(ListWebhookDeliveriesCmd cmd) { + final CallContext ctx = CallContext.current(); + final Account caller = ctx.getCallingAccount(); + final Long id = cmd.getId(); + final Long webhookId = cmd.getWebhookId(); + final Long managementServerId = cmd.getManagementServerId(); + final String keyword = cmd.getKeyword(); + final Date startDate = cmd.getStartDate(); + final Date endDate = cmd.getEndDate(); + final String eventType = cmd.getEventType(); + List responsesList = new ArrayList<>(); + ManagementServerHostVO host = basicWebhookDeliveryApiCheck(caller, id, webhookId, managementServerId, + startDate, endDate); + + Filter searchFilter = new Filter(WebhookDeliveryJoinVO.class, "id", false, cmd.getStartIndex(), + cmd.getPageSizeVal()); + List webhookIds = new ArrayList<>(); + if (webhookId != null) { + webhookIds.add(webhookId); + } else { + webhookIds.addAll(getIdsOfAccessibleWebhooks(caller)); + } + Pair, Integer> deliveriesAndCount = + webhookDeliveryJoinDao.searchAndCountByListApiParameters(id, webhookIds, + (host != null ? host.getMsid() : null), keyword, startDate, endDate, eventType, searchFilter); + for (WebhookDeliveryJoinVO delivery : deliveriesAndCount.first()) { + WebhookDeliveryResponse response = createWebhookDeliveryResponse(delivery); + responsesList.add(response); + } + ListResponse response = new ListResponse<>(); + response.setResponses(responsesList, deliveriesAndCount.second()); + return response; + } + + @Override + public int deleteWebhookDelivery(DeleteWebhookDeliveryCmd cmd) throws CloudRuntimeException { + final CallContext ctx = CallContext.current(); + final Account caller = ctx.getCallingAccount(); + final Long id = cmd.getId(); + final Long webhookId = cmd.getWebhookId(); + final Long managementServerId = cmd.getManagementServerId(); + final Date startDate = cmd.getStartDate(); + final Date endDate = cmd.getEndDate(); + ManagementServerHostVO host = basicWebhookDeliveryApiCheck(caller, id, webhookId, managementServerId, + startDate, endDate); + int removed = webhookDeliveryDao.deleteByDeleteApiParams(id, webhookId, + (host != null ? host.getMsid() : null), startDate, endDate); + logger.info("{} webhook deliveries removed", removed); + return removed; + } + + @Override + public WebhookDeliveryResponse executeWebhookDelivery(ExecuteWebhookDeliveryCmd cmd) throws CloudRuntimeException { + final CallContext ctx = CallContext.current(); + final Account caller = ctx.getCallingAccount(); + final Long deliveryId = cmd.getId(); + final Long webhookId = cmd.getWebhookId(); + final String payloadUrl = getNormalizedPayloadUrl(cmd.getPayloadUrl()); + final String secretKey = cmd.getSecretKey(); + final Boolean sslVerification = cmd.isSslVerification(); + final String payload = cmd.getPayload(); + final Account owner = accountManager.finalizeOwner(caller, null, null, null); + + if (ObjectUtils.allNull(deliveryId, webhookId) && StringUtils.isBlank(payloadUrl)) { + throw new InvalidParameterValueException(String.format("One of the %s, %s or %s must be specified", + ApiConstants.ID, ApiConstants.WEBHOOK_ID, ApiConstants.PAYLOAD_URL)); + } + WebhookDeliveryVO existingDelivery = null; + WebhookVO webhook = null; + if (deliveryId != null) { + existingDelivery = webhookDeliveryDao.findById(deliveryId); + if (existingDelivery == null) { + throw new InvalidParameterValueException("Invalid webhook delivery specified"); + } + webhook = webhookDao.findById(existingDelivery.getWebhookId()); + } + if (StringUtils.isNotBlank(payloadUrl)) { + UriUtils.validateUrl(payloadUrl); + } + if (webhookId != null) { + webhook = webhookDao.findById(webhookId); + if (webhook == null) { + throw new InvalidParameterValueException("Invalid webhook specified"); + } + if (StringUtils.isNotBlank(payloadUrl)) { + webhook.setPayloadUrl(payloadUrl); + } + if (StringUtils.isNotBlank(secretKey)) { + webhook.setSecretKey(secretKey); + } + if (sslVerification != null) { + webhook.setSslVerification(Boolean.TRUE.equals(sslVerification)); + } + } + if (ObjectUtils.allNull(deliveryId, webhookId)) { + webhook = new WebhookVO(owner.getDomainId(), owner.getId(), payloadUrl, secretKey, + Boolean.TRUE.equals(sslVerification)); + } + WebhookDelivery webhookDelivery = webhookService.executeWebhookDelivery(existingDelivery, webhook, payload); + if (webhookDelivery.getId() != WebhookDelivery.ID_DUMMY) { + return createWebhookDeliveryResponse(webhookDeliveryJoinDao.findById(webhookDelivery.getId())); + } + return createTestWebhookDeliveryResponse(webhookDelivery, webhook); + } + + @Override + public List> getCommands() { + List> cmdList = new ArrayList<>(); + cmdList.add(CreateWebhookCmd.class); + cmdList.add(ListWebhooksCmd.class); + cmdList.add(UpdateWebhookCmd.class); + cmdList.add(DeleteWebhookCmd.class); + cmdList.add(ListWebhookDeliveriesCmd.class); + cmdList.add(DeleteWebhookDeliveryCmd.class); + cmdList.add(ExecuteWebhookDeliveryCmd.class); + return cmdList; + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookDelivery.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookDelivery.java new file mode 100644 index 00000000000..b24891539f9 --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookDelivery.java @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook; + +import java.util.Date; + +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + +public interface WebhookDelivery extends Identity, InternalIdentity { + public static final long ID_DUMMY = 0L; + public static final String TEST_EVENT_TYPE = "TEST.WEBHOOK"; + + long getId(); + long getEventId(); + long getWebhookId(); + long getManagementServerId(); + String getHeaders(); + String getPayload(); + boolean isSuccess(); + String getResponse(); + Date getStartTime(); + Date getEndTime(); +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookDeliveryThread.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookDeliveryThread.java new file mode 100644 index 00000000000..ac840c00be3 --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookDeliveryThread.java @@ -0,0 +1,287 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.security.InvalidKeyException; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Date; +import java.util.List; + +import javax.crypto.Mac; +import javax.crypto.SecretKey; +import javax.crypto.spec.SecretKeySpec; + +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.framework.async.AsyncRpcContext; +import org.apache.cloudstack.framework.events.Event; +import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.commons.codec.DecoderException; +import org.apache.commons.codec.binary.Base64; +import org.apache.commons.httpclient.HttpStatus; +import org.apache.commons.lang3.StringUtils; +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.HttpHeaders; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.TrustAllStrategy; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.message.BasicHeader; +import org.apache.http.ssl.SSLContextBuilder; +import org.apache.http.util.EntityUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.json.JSONArray; +import org.json.JSONException; +import org.json.JSONObject; + +public class WebhookDeliveryThread implements Runnable { + protected static Logger LOGGER = LogManager.getLogger(WebhookDeliveryThread.class); + + private static final String HEADER_X_CS_EVENT_ID = "X-CS-Event-ID"; + private static final String HEADER_X_CS_EVENT = "X-CS-Event"; + private static final String HEADER_X_CS_SIGNATURE = "X-CS-Signature"; + private static final String PREFIX_HEADER_USER_AGENT = "CS-Hookshot/"; + private final Webhook webhook; + private final Event event; + private CloseableHttpClient httpClient; + private String headers; + private String payload; + private String response; + private Date startTime; + private int deliveryTries = 3; + private int deliveryTimeout = 10; + + AsyncCompletionCallback callback; + + protected boolean isValidJson(String json) { + try { + new JSONObject(json); + } catch (JSONException ex) { + try { + new JSONArray(json); + } catch (JSONException ex1) { + return false; + } + } + return true; + } + + protected void setHttpClient() throws NoSuchAlgorithmException, KeyStoreException, KeyManagementException { + if (webhook.isSslVerification()) { + httpClient = HttpClients.createDefault(); + return; + } + httpClient = HttpClients + .custom() + .setSSLContext(new SSLContextBuilder().loadTrustMaterial(null, + TrustAllStrategy.INSTANCE).build()) + .setSSLHostnameVerifier(NoopHostnameVerifier.INSTANCE) + .build(); + } + + protected HttpPost getBasicHttpPostRequest() throws URISyntaxException { + final URI uri = new URI(webhook.getPayloadUrl()); + HttpPost request = new HttpPost(); + RequestConfig.Builder requestConfig = RequestConfig.custom(); + requestConfig.setConnectTimeout(deliveryTimeout * 1000); + requestConfig.setConnectionRequestTimeout(deliveryTimeout * 1000); + requestConfig.setSocketTimeout(deliveryTimeout * 1000); + request.setConfig(requestConfig.build()); + request.setURI(uri); + return request; + } + + protected void updateRequestHeaders(HttpPost request) throws DecoderException, NoSuchAlgorithmException, + InvalidKeyException { + request.addHeader(HEADER_X_CS_EVENT_ID, event.getEventUuid()); + request.addHeader(HEADER_X_CS_EVENT, event.getEventType()); + request.setHeader(HttpHeaders.USER_AGENT, String.format("%s%s", PREFIX_HEADER_USER_AGENT, + event.getResourceAccountUuid())); + if (StringUtils.isNotBlank(webhook.getSecretKey())) { + request.addHeader(HEADER_X_CS_SIGNATURE, generateHMACSignature(payload, webhook.getSecretKey())); + } + List
    headers = new ArrayList<>(Arrays.asList(request.getAllHeaders())); + HttpEntity entity = request.getEntity(); + if (entity.getContentLength() > 0 && !request.containsHeader(HttpHeaders.CONTENT_LENGTH)) { + headers.add(new BasicHeader(HttpHeaders.CONTENT_LENGTH, Long.toString(entity.getContentLength()))); + } + if (entity.getContentType() != null && !request.containsHeader(HttpHeaders.CONTENT_TYPE)) { + headers.add(entity.getContentType()); + } + if (entity.getContentEncoding() != null && !request.containsHeader(HttpHeaders.CONTENT_ENCODING)) { + headers.add(entity.getContentEncoding()); + } + this.headers = StringUtils.join(headers, "\n"); + } + + public WebhookDeliveryThread(Webhook webhook, Event event, + AsyncCompletionCallback callback) { + this.webhook = webhook; + this.event = event; + this.callback = callback; + } + + public void setDeliveryTries(int deliveryTries) { + this.deliveryTries = deliveryTries; + } + + public void setDeliveryTimeout(int deliveryTimeout) { + this.deliveryTimeout = deliveryTimeout; + } + + @Override + public void run() { + LOGGER.debug("Delivering event: {} for {}", event.getEventType(), webhook); + if (event == null) { + LOGGER.warn("Invalid event received for delivering to {}", webhook); + return; + } + payload = event.getDescription(); + LOGGER.trace("Payload: {}", payload); + int attempt = 0; + boolean success = false; + try { + setHttpClient(); + } catch (NoSuchAlgorithmException | KeyManagementException | KeyStoreException e) { + response = String.format("Failed to initiate delivery due to : %s", e.getMessage()); + callback.complete(new WebhookDeliveryResult(headers, payload, success, response, new Date())); + return; + } + while (attempt < deliveryTries) { + attempt++; + if (delivery(attempt)) { + success = true; + break; + } + } + callback.complete(new WebhookDeliveryResult(headers, payload, success, response, startTime)); + } + + protected void updateResponseFromRequest(HttpEntity entity) { + try { + this.response = EntityUtils.toString(entity, StandardCharsets.UTF_8); + } catch (IOException e) { + LOGGER.error("Failed to parse response for event: {} for {}", + event.getEventType(), webhook); + this.response = ""; + } + } + + protected boolean delivery(int attempt) { + startTime = new Date(); + try { + HttpPost request = getBasicHttpPostRequest(); + StringEntity input = new StringEntity(payload, + isValidJson(payload) ? ContentType.APPLICATION_JSON : ContentType.TEXT_PLAIN); + request.setEntity(input); + updateRequestHeaders(request); + LOGGER.trace("Delivering event: {} for {} with timeout: {}, " + + "attempt #{}", event.getEventType(), webhook, + deliveryTimeout, attempt); + final CloseableHttpResponse response = httpClient.execute(request); + updateResponseFromRequest(response.getEntity()); + if (response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) { + LOGGER.trace("Successfully delivered event: {} for {}", + event.getEventType(), webhook); + return true; + } + } catch (URISyntaxException | IOException | DecoderException | NoSuchAlgorithmException | + InvalidKeyException e) { + LOGGER.warn("Failed to deliver {}, in attempt #{} due to: {}", + webhook, attempt, e.getMessage()); + response = String.format("Failed due to : %s", e.getMessage()); + } + return false; + } + + public static String generateHMACSignature(String data, String key) + throws InvalidKeyException, NoSuchAlgorithmException, DecoderException { + Mac mac = Mac.getInstance("HMACSHA256"); + SecretKey secretKey = new SecretKeySpec(key.getBytes(StandardCharsets.UTF_8), mac.getAlgorithm()); + mac.init(secretKey); + byte[] dataAsBytes = data.getBytes(StandardCharsets.UTF_8); + byte[] encodedText = mac.doFinal(dataAsBytes); + return new String(Base64.encodeBase64(encodedText)).trim(); + } + + public static class WebhookDeliveryContext extends AsyncRpcContext { + private final Long eventId; + private final Long ruleId; + + public WebhookDeliveryContext(AsyncCompletionCallback callback, Long eventId, Long ruleId) { + super(callback); + this.eventId = eventId; + this.ruleId = ruleId; + } + + public Long getEventId() { + return eventId; + } + + public Long getRuleId() { + return ruleId; + } + } + + public static class WebhookDeliveryResult extends CommandResult { + private final String headers; + private final String payload; + private final Date starTime; + private final Date endTime; + + public WebhookDeliveryResult(String headers, String payload, boolean success, String response, Date starTime) { + super(); + this.headers = headers; + this.payload = payload; + this.setResult(response); + this.setSuccess(success); + this.starTime = starTime; + this.endTime = new Date(); + } + + public String getHeaders() { + return headers; + } + + public String getPayload() { + return payload; + } + + public Date getStarTime() { + return starTime; + } + + public Date getEndTime() { + return endTime; + } + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookEventBus.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookEventBus.java new file mode 100644 index 00000000000..c2dade84361 --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookEventBus.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.cloudstack.mom.webhook; + +import java.util.Map; +import java.util.UUID; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.framework.events.Event; +import org.apache.cloudstack.framework.events.EventBus; +import org.apache.cloudstack.framework.events.EventBusException; +import org.apache.cloudstack.framework.events.EventSubscriber; +import org.apache.cloudstack.framework.events.EventTopic; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import com.cloud.utils.component.ManagerBase; +import com.google.gson.Gson; + +public class WebhookEventBus extends ManagerBase implements EventBus { + + protected static Logger LOGGER = LogManager.getLogger(WebhookEventBus.class); + private static Gson gson; + + @Inject + WebhookService webhookService; + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + _name = name; + return true; + } + + @Override + public void setName(String name) { + _name = name; + } + + @Override + public UUID subscribe(EventTopic topic, EventSubscriber subscriber) throws EventBusException { + /* NOOP */ + return UUID.randomUUID(); + } + + @Override + public void unsubscribe(UUID subscriberId, EventSubscriber subscriber) throws EventBusException { + /* NOOP */ + } + + @Override + public void publish(Event event) throws EventBusException { + webhookService.handleEvent(event); + } + + @Override + public String getName() { + return _name; + } + + @Override + public boolean start() { + return true; + } + + @Override + public boolean stop() { + return true; + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookService.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookService.java new file mode 100644 index 00000000000..5a5aced288d --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookService.java @@ -0,0 +1,63 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook; + +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; +import org.apache.cloudstack.framework.events.Event; +import org.apache.cloudstack.framework.events.EventBusException; + +import com.cloud.utils.component.PluggableService; +import com.cloud.utils.exception.CloudRuntimeException; + +public interface WebhookService extends PluggableService, Configurable { + + ConfigKey WebhookDeliveryTimeout = new ConfigKey<>("Advanced", Integer.class, + "webhook.delivery.timeout", "10", + "Wait timeout (in seconds) for a webhook delivery delivery", + true, ConfigKey.Scope.Domain); + + ConfigKey WebhookDeliveryTries = new ConfigKey<>("Advanced", Integer.class, + "webhook.delivery.tries", "3", + "Number of tries to be made for a webhook delivery", + true, ConfigKey.Scope.Domain); + + ConfigKey WebhookDeliveryThreadPoolSize = new ConfigKey<>("Advanced", Integer.class, + "webhook.delivery.thread.pool.size", "5", + "Size of the thread pool for webhook deliveries", + false, ConfigKey.Scope.Global); + + ConfigKey WebhookDeliveriesLimit = new ConfigKey<>("Advanced", Integer.class, + "webhook.deliveries.limit", "10", + "Limit for the number of deliveries to keep in DB per webhook", + true, ConfigKey.Scope.Global); + + ConfigKey WebhookDeliveriesCleanupInitialDelay = new ConfigKey<>("Advanced", Integer.class, + "webhook.deliveries.cleanup.initial.delay", "180", + "Initial delay (in seconds) for webhook deliveries cleanup task", + false, ConfigKey.Scope.Global); + + ConfigKey WebhookDeliveriesCleanupInterval = new ConfigKey<>("Advanced", Integer.class, + "webhook.deliveries.cleanup.interval", "3600", + "Interval (in seconds) for cleaning up webhook deliveries", + false, ConfigKey.Scope.Global); + + void handleEvent(Event event) throws EventBusException; + WebhookDelivery executeWebhookDelivery(WebhookDelivery delivery, Webhook webhook, String payload) + throws CloudRuntimeException; +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookServiceImpl.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookServiceImpl.java new file mode 100644 index 00000000000..58b265a99c0 --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/WebhookServiceImpl.java @@ -0,0 +1,354 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.framework.async.AsyncRpcContext; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.events.Event; +import org.apache.cloudstack.framework.events.EventBusException; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.mom.webhook.dao.WebhookDao; +import org.apache.cloudstack.mom.webhook.dao.WebhookDeliveryDao; +import org.apache.cloudstack.mom.webhook.vo.WebhookDeliveryVO; +import org.apache.cloudstack.mom.webhook.vo.WebhookVO; +import org.apache.cloudstack.utils.identity.ManagementServerNode; +import org.apache.cloudstack.webhook.WebhookHelper; +import org.apache.commons.lang3.StringUtils; + +import com.cloud.api.query.vo.EventJoinVO; +import com.cloud.cluster.ManagementServerHostVO; +import com.cloud.cluster.dao.ManagementServerHostDao; +import com.cloud.domain.dao.DomainDao; +import com.cloud.event.EventCategory; +import com.cloud.event.dao.EventJoinDao; +import com.cloud.server.ManagementService; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.utils.Pair; +import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.concurrency.NamedThreadFactory; +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.exception.CloudRuntimeException; + +public class WebhookServiceImpl extends ManagerBase implements WebhookService, WebhookHelper { + public static final String WEBHOOK_JOB_POOL_THREAD_PREFIX = "Webhook-Job-Executor"; + private ExecutorService webhookJobExecutor; + private ScheduledExecutorService webhookDeliveriesCleanupExecutor; + + @Inject + EventJoinDao eventJoinDao; + @Inject + WebhookDao webhookDao; + @Inject + protected WebhookDeliveryDao webhookDeliveryDao; + @Inject + ManagementServerHostDao managementServerHostDao; + @Inject + DomainDao domainDao; + @Inject + AccountManager accountManager; + + protected WebhookDeliveryThread getDeliveryJob(Event event, Webhook webhook, Pair configs) { + WebhookDeliveryThread.WebhookDeliveryContext context = + new WebhookDeliveryThread.WebhookDeliveryContext<>(null, event.getEventId(), webhook.getId()); + AsyncCallbackDispatcher caller = + AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().deliveryCompleteCallback(null, null)) + .setContext(context); + WebhookDeliveryThread job = new WebhookDeliveryThread(webhook, event, caller); + job = ComponentContext.inject(job); + job.setDeliveryTries(configs.first()); + job.setDeliveryTimeout(configs.second()); + return job; + } + + protected List getDeliveryJobs(Event event) throws EventBusException { + List jobs = new ArrayList<>(); + if (!EventCategory.ACTION_EVENT.getName().equals(event.getEventCategory())) { + return jobs; + } + if (event.getResourceAccountId() == null) { + logger.warn("Skipping delivering event [ID: {}, description: {}] to any webhook as account ID is missing", + event.getEventId(), event.getDescription()); + throw new EventBusException(String.format("Account missing for the event ID: %s", event.getEventUuid())); + } + List domainIds = new ArrayList<>(); + if (event.getResourceDomainId() != null) { + domainIds.add(event.getResourceDomainId()); + domainIds.addAll(domainDao.getDomainParentIds(event.getResourceDomainId())); + } + List webhooks = + webhookDao.listByEnabledForDelivery(event.getResourceAccountId(), domainIds); + Map> domainConfigs = new HashMap<>(); + for (WebhookVO webhook : webhooks) { + if (!domainConfigs.containsKey(webhook.getDomainId())) { + domainConfigs.put(webhook.getDomainId(), + new Pair<>(WebhookDeliveryTries.valueIn(webhook.getDomainId()), + WebhookDeliveryTimeout.valueIn(webhook.getDomainId()))); + } + Pair configs = domainConfigs.get(webhook.getDomainId()); + WebhookDeliveryThread job = getDeliveryJob(event, webhook, configs); + jobs.add(job); + } + return jobs; + } + + protected Runnable getManualDeliveryJob(WebhookDelivery existingDelivery, Webhook webhook, String payload, + AsyncCallFuture future) { + if (StringUtils.isBlank(payload)) { + payload = "{ \"CloudStack\": \"works!\" }"; + } + long eventId = Webhook.ID_DUMMY; + String eventType = WebhookDelivery.TEST_EVENT_TYPE; + String eventUuid = UUID.randomUUID().toString(); + String description = payload; + String resourceAccountUuid = null; + if (existingDelivery != null) { + EventJoinVO eventJoinVO = eventJoinDao.findById(existingDelivery.getEventId()); + eventId = eventJoinVO.getId(); + eventType = eventJoinVO.getType(); + eventUuid = eventJoinVO.getUuid(); + description = existingDelivery.getPayload(); + resourceAccountUuid = eventJoinVO.getAccountUuid(); + } else { + Account account = accountManager.getAccount(webhook.getAccountId()); + resourceAccountUuid = account.getUuid(); + } + Event event = new Event(ManagementService.Name, EventCategory.ACTION_EVENT.toString(), + eventType, null, null); + event.setEventId(eventId); + event.setEventUuid(eventUuid); + event.setDescription(description); + event.setResourceAccountUuid(resourceAccountUuid); + ManualDeliveryContext context = + new ManualDeliveryContext<>(null, webhook, future); + AsyncCallbackDispatcher caller = + AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().manualDeliveryCompleteCallback(null, null)) + .setContext(context); + WebhookDeliveryThread job = new WebhookDeliveryThread(webhook, event, caller); + job.setDeliveryTries(WebhookDeliveryTries.valueIn(webhook.getDomainId())); + job.setDeliveryTimeout(WebhookDeliveryTimeout.valueIn(webhook.getDomainId())); + return job; + } + + protected Void deliveryCompleteCallback( + AsyncCallbackDispatcher callback, + WebhookDeliveryThread.WebhookDeliveryContext context) { + WebhookDeliveryThread.WebhookDeliveryResult result = callback.getResult(); + WebhookDeliveryVO deliveryVO = new WebhookDeliveryVO(context.getEventId(), context.getRuleId(), + ManagementServerNode.getManagementServerId(), result.getHeaders(), result.getPayload(), + result.isSuccess(), result.getResult(), result.getStarTime(), result.getEndTime()); + webhookDeliveryDao.persist(deliveryVO); + return null; + } + + protected Void manualDeliveryCompleteCallback( + AsyncCallbackDispatcher callback, + ManualDeliveryContext context) { + WebhookDeliveryThread.WebhookDeliveryResult result = callback.getResult(); + context.future.complete(result); + return null; + } + + protected long cleanupOldWebhookDeliveries(long deliveriesLimit) { + Filter filter = new Filter(WebhookVO.class, "id", true, 0L, 50L); + Pair, Integer> webhooksAndCount = + webhookDao.searchAndCount(webhookDao.createSearchCriteria(), filter); + List webhooks = webhooksAndCount.first(); + long count = webhooksAndCount.second(); + long processed = 0; + do { + for (WebhookVO webhook : webhooks) { + webhookDeliveryDao.removeOlderDeliveries(webhook.getId(), deliveriesLimit); + processed++; + } + if (processed < count) { + filter.setOffset(processed); + webhooks = webhookDao.listAll(filter); + } + } while (processed < count); + return processed; + } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + try { + webhookJobExecutor = Executors.newFixedThreadPool(WebhookDeliveryThreadPoolSize.value(), + new NamedThreadFactory(WEBHOOK_JOB_POOL_THREAD_PREFIX)); + webhookDeliveriesCleanupExecutor = Executors.newScheduledThreadPool(1, + new NamedThreadFactory("Webhook-Deliveries-Cleanup-Worker")); + } catch (final Exception e) { + throw new ConfigurationException("Unable to to configure WebhookServiceImpl"); + } + return true; + } + + @Override + public boolean start() { + long webhookDeliveriesCleanupInitialDelay = WebhookDeliveriesCleanupInitialDelay.value(); + long webhookDeliveriesCleanupInterval = WebhookDeliveriesCleanupInterval.value(); + logger.debug("Scheduling webhook deliveries cleanup task with initial delay={}s and interval={}s", + webhookDeliveriesCleanupInitialDelay, webhookDeliveriesCleanupInterval); + webhookDeliveriesCleanupExecutor.scheduleWithFixedDelay(new WebhookDeliveryCleanupWorker(), + webhookDeliveriesCleanupInitialDelay, webhookDeliveriesCleanupInterval, TimeUnit.SECONDS); + return true; + } + + @Override + public boolean stop() { + webhookJobExecutor.shutdown(); + return true; + } + + @Override + public String getConfigComponentName() { + return WebhookService.class.getName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[]{ + WebhookDeliveryTimeout, + WebhookDeliveryTries, + WebhookDeliveryThreadPoolSize, + WebhookDeliveriesLimit, + WebhookDeliveriesCleanupInitialDelay, + WebhookDeliveriesCleanupInterval + }; + } + + @Override + public void deleteWebhooksForAccount(long accountId) { + webhookDao.deleteByAccount(accountId); + } + + @Override + public List listWebhooksByAccount(long accountId) { + return webhookDao.listByAccount(accountId); + } + + @Override + public void handleEvent(Event event) throws EventBusException { + List jobs = getDeliveryJobs(event); + for(Runnable job : jobs) { + webhookJobExecutor.submit(job); + } + } + + @Override + public WebhookDelivery executeWebhookDelivery(WebhookDelivery delivery, Webhook webhook, String payload) + throws CloudRuntimeException { + AsyncCallFuture future = new AsyncCallFuture<>(); + Runnable job = getManualDeliveryJob(delivery, webhook, payload, future); + webhookJobExecutor.submit(job); + WebhookDeliveryThread.WebhookDeliveryResult result = null; + WebhookDeliveryVO webhookDeliveryVO; + try { + result = future.get(); + if (delivery != null) { + webhookDeliveryVO = new WebhookDeliveryVO(delivery.getEventId(), delivery.getWebhookId(), + ManagementServerNode.getManagementServerId(), result.getHeaders(), result.getPayload(), + result.isSuccess(), result.getResult(), result.getStarTime(), result.getEndTime()); + webhookDeliveryVO = webhookDeliveryDao.persist(webhookDeliveryVO); + } else { + webhookDeliveryVO = new WebhookDeliveryVO(ManagementServerNode.getManagementServerId(), + result.getHeaders(), result.getPayload(), result.isSuccess(), result.getResult(), + result.getStarTime(), result.getEndTime()); + } + } catch (InterruptedException | ExecutionException e) { + logger.error(String.format("Failed to execute test webhook delivery due to: %s", e.getMessage()), e); + throw new CloudRuntimeException("Failed to execute test webhook delivery"); + } + return webhookDeliveryVO; + } + + @Override + public List> getCommands() { + return new ArrayList<>(); + } + + static public class ManualDeliveryContext extends AsyncRpcContext { + final Webhook webhook; + final AsyncCallFuture future; + + public ManualDeliveryContext(AsyncCompletionCallback callback, Webhook webhook, + AsyncCallFuture future) { + super(callback); + this.webhook = webhook; + this.future = future; + } + + } + + public class WebhookDeliveryCleanupWorker extends ManagedContextRunnable { + + protected void runCleanupForLongestRunningManagementServer() { + try { + ManagementServerHostVO msHost = managementServerHostDao.findOneByLongestRuntime(); + if (msHost == null || (msHost.getMsid() != ManagementServerNode.getManagementServerId())) { + logger.debug("Skipping the webhook delivery cleanup task on this management server"); + return; + } + long deliveriesLimit = WebhookDeliveriesLimit.value(); + logger.debug("Clearing old deliveries for webhooks with limit={} using management server {}", + deliveriesLimit, msHost.getMsid()); + long processed = cleanupOldWebhookDeliveries(deliveriesLimit); + logger.debug("Cleared old deliveries with limit={} for {} webhooks", deliveriesLimit, processed); + } catch (Exception e) { + logger.warn("Cleanup task failed to cleanup old webhook deliveries", e); + } + } + + @Override + protected void runInContext() { + GlobalLock gcLock = GlobalLock.getInternLock("WebhookDeliveriesCleanup"); + try { + if (gcLock.lock(3)) { + try { + runCleanupForLongestRunningManagementServer(); + } finally { + gcLock.unlock(); + } + } + } finally { + gcLock.releaseRef(); + } + } + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/CreateWebhookCmd.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/CreateWebhookCmd.java new file mode 100644 index 00000000000..d3d2cf18e1f --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/CreateWebhookCmd.java @@ -0,0 +1,167 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.api.command.user; + + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.acl.SecurityChecker; +import org.apache.cloudstack.api.ACL; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.DomainResponse; +import org.apache.cloudstack.api.response.ProjectResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.mom.webhook.WebhookApiService; +import org.apache.cloudstack.mom.webhook.Webhook; +import org.apache.cloudstack.mom.webhook.api.response.WebhookResponse; + +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = "createWebhook", + description = "Creates a Webhook", + responseObject = WebhookResponse.class, + responseView = ResponseObject.ResponseView.Restricted, + entityType = {Webhook.class}, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = true, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, + since = "4.20.0") +public class CreateWebhookCmd extends BaseCmd { + + @Inject + WebhookApiService webhookApiService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.NAME, type = BaseCmd.CommandType.STRING, required = true, description = "Name for the Webhook") + private String name; + + @Parameter(name = ApiConstants.DESCRIPTION, type = BaseCmd.CommandType.STRING, description = "Description for the Webhook") + private String description; + + @Parameter(name = ApiConstants.STATE, type = BaseCmd.CommandType.STRING, description = "State of the Webhook") + private String state; + + @ACL(accessType = SecurityChecker.AccessType.UseEntry) + @Parameter(name = ApiConstants.ACCOUNT, type = BaseCmd.CommandType.STRING, description = "An optional account for the" + + " Webhook. Must be used with domainId.") + private String accountName; + + @ACL(accessType = SecurityChecker.AccessType.UseEntry) + @Parameter(name = ApiConstants.DOMAIN_ID, type = BaseCmd.CommandType.UUID, entityType = DomainResponse.class, + description = "an optional domainId for the Webhook. If the account parameter is used, domainId must also be used.") + private Long domainId; + + @ACL(accessType = SecurityChecker.AccessType.UseEntry) + @Parameter(name = ApiConstants.PROJECT_ID, type = BaseCmd.CommandType.UUID, entityType = ProjectResponse.class, + description = "Project for the Webhook") + private Long projectId; + + @Parameter(name = ApiConstants.PAYLOAD_URL, + type = BaseCmd.CommandType.STRING, + required = true, + description = "Payload URL of the Webhook") + private String payloadUrl; + + @Parameter(name = ApiConstants.SECRET_KEY, type = BaseCmd.CommandType.STRING, description = "Secret key of the Webhook") + private String secretKey; + + @Parameter(name = ApiConstants.SSL_VERIFICATION, type = BaseCmd.CommandType.BOOLEAN, description = "If set to true then SSL verification will be done for the Webhook otherwise not") + private Boolean sslVerification; + + @Parameter(name = ApiConstants.SCOPE, type = BaseCmd.CommandType.STRING, description = "Scope of the Webhook", + authorized = {RoleType.Admin, RoleType.DomainAdmin}) + private String scope; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + + public String getName() { + return name; + } + + public String getDescription() { + return description; + } + + public String getState() { + return state; + } + + public String getAccountName() { + return accountName; + } + + public Long getDomainId() { + return domainId; + } + + public Long getProjectId() { + return projectId; + } + + public String getPayloadUrl() { + return payloadUrl; + } + + public String getSecretKey() { + return secretKey; + } + + public boolean isSslVerification() { + return Boolean.TRUE.equals(sslVerification); + } + + public String getScope() { + return scope; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccountId(); + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() throws ServerApiException { + try { + WebhookResponse response = webhookApiService.createWebhook(this); + if (response == null) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create webhook"); + } + response.setResponseName(getCommandName()); + setResponseObject(response); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/DeleteWebhookCmd.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/DeleteWebhookCmd.java new file mode 100644 index 00000000000..c9fb01580c2 --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/DeleteWebhookCmd.java @@ -0,0 +1,84 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.api.command.user; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.mom.webhook.WebhookApiService; +import org.apache.cloudstack.mom.webhook.Webhook; +import org.apache.cloudstack.mom.webhook.api.response.WebhookResponse; + +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = "deleteWebhook", + description = "Deletes a Webhook", + responseObject = SuccessResponse.class, + entityType = {Webhook.class}, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, + since = "4.20.0") +public class DeleteWebhookCmd extends BaseCmd { + + @Inject + WebhookApiService webhookApiService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, + entityType = WebhookResponse.class, + required = true, + description = "The ID of the Webhook") + private Long id; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + public Long getId() { + return id; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccountId(); + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + @Override + public void execute() throws ServerApiException { + try { + if (!webhookApiService.deleteWebhook(this)) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to delete webhook ID: %d", getId())); + } + SuccessResponse response = new SuccessResponse(getCommandName()); + setResponseObject(response); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/DeleteWebhookDeliveryCmd.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/DeleteWebhookDeliveryCmd.java new file mode 100644 index 00000000000..dcfe71bf171 --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/DeleteWebhookDeliveryCmd.java @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.api.command.user; + +import java.util.Date; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ManagementServerResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.mom.webhook.WebhookApiService; +import org.apache.cloudstack.mom.webhook.WebhookDelivery; +import org.apache.cloudstack.mom.webhook.api.response.WebhookDeliveryResponse; +import org.apache.cloudstack.mom.webhook.api.response.WebhookResponse; + +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = "deleteWebhookDelivery", + description = "Deletes Webhook delivery", + responseObject = SuccessResponse.class, + entityType = {WebhookDelivery.class}, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, + since = "4.20.0") +public class DeleteWebhookDeliveryCmd extends BaseCmd { + + @Inject + WebhookApiService webhookApiService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ID, type = BaseCmd.CommandType.UUID, + entityType = WebhookDeliveryResponse.class, + description = "The ID of the Webhook delivery") + private Long id; + + @Parameter(name = ApiConstants.WEBHOOK_ID, type = BaseCmd.CommandType.UUID, + entityType = WebhookResponse.class, + description = "The ID of the Webhook") + private Long webhookId; + + @Parameter(name = ApiConstants.MANAGEMENT_SERVER_ID, type = BaseCmd.CommandType.UUID, + entityType = ManagementServerResponse.class, + description = "The ID of the management server", + authorized = {RoleType.Admin}) + private Long managementServerId; + + @Parameter(name = ApiConstants.START_DATE, + type = CommandType.DATE, + description = "The start date range for the Webhook delivery " + + "(use format \"yyyy-MM-dd\" or \"yyyy-MM-dd HH:mm:ss\"). " + + "All deliveries having start date equal to or after the specified date will be considered.") + private Date startDate; + + @Parameter(name = ApiConstants.END_DATE, + type = CommandType.DATE, + description = "The end date range for the Webhook delivery " + + "(use format \"yyyy-MM-dd\" or \"yyyy-MM-dd HH:mm:ss\"). " + + "All deliveries having end date equal to or before the specified date will be considered.") + private Date endDate; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + public Long getId() { + return id; + } + + public Long getWebhookId() { + return webhookId; + } + + public Long getManagementServerId() { + return managementServerId; + } + + public Date getStartDate() { + return startDate; + } + + public Date getEndDate() { + return endDate; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccountId(); + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + @Override + public void execute() throws ServerApiException { + try { + webhookApiService.deleteWebhookDelivery(this); + SuccessResponse response = new SuccessResponse(getCommandName()); + setResponseObject(response); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/ExecuteWebhookDeliveryCmd.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/ExecuteWebhookDeliveryCmd.java new file mode 100644 index 00000000000..f31a5481376 --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/ExecuteWebhookDeliveryCmd.java @@ -0,0 +1,132 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.api.command.user; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.mom.webhook.WebhookApiService; +import org.apache.cloudstack.mom.webhook.WebhookDelivery; +import org.apache.cloudstack.mom.webhook.api.response.WebhookDeliveryResponse; +import org.apache.cloudstack.mom.webhook.api.response.WebhookResponse; + +import com.cloud.utils.exception.CloudRuntimeException; + + +@APICommand(name = "executeWebhookDelivery", + description = "Executes a Webhook delivery", + responseObject = WebhookDeliveryResponse.class, + entityType = {WebhookDelivery.class}, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, + since = "4.20.0") +public class ExecuteWebhookDeliveryCmd extends BaseCmd { + + @Inject + WebhookApiService webhookApiService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, + entityType = WebhookDeliveryResponse.class, + description = "The ID of the Webhook delivery for redelivery") + private Long id; + + @Parameter(name = ApiConstants.WEBHOOK_ID, type = CommandType.UUID, + entityType = WebhookResponse.class, + description = "The ID of the Webhook") + private Long webhookId; + + @Parameter(name = ApiConstants.PAYLOAD_URL, + type = BaseCmd.CommandType.STRING, + description = "Payload URL of the Webhook delivery") + private String payloadUrl; + + @Parameter(name = ApiConstants.SECRET_KEY, type = BaseCmd.CommandType.STRING, description = "Secret key of the Webhook delivery") + private String secretKey; + + @Parameter(name = ApiConstants.SSL_VERIFICATION, type = BaseCmd.CommandType.BOOLEAN, description = "If set to true then SSL verification will be done for the Webhook delivery otherwise not") + private Boolean sslVerification; + + @Parameter(name = ApiConstants.PAYLOAD, + type = BaseCmd.CommandType.STRING, + description = "Payload of the Webhook delivery") + private String payload; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + + public Long getId() { + return id; + } + + public Long getWebhookId() { + return webhookId; + } + + public String getPayloadUrl() { + return payloadUrl; + } + + public String getSecretKey() { + return secretKey; + } + + public Boolean isSslVerification() { + return sslVerification; + } + + public String getPayload() { + return payload; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccountId(); + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() throws ServerApiException { + try { + WebhookDeliveryResponse response = webhookApiService.executeWebhookDelivery(this); + if (response == null) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to test Webhook delivery"); + } + response.setResponseName(getCommandName()); + setResponseObject(response); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/ListWebhookDeliveriesCmd.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/ListWebhookDeliveriesCmd.java new file mode 100644 index 00000000000..466dad0d122 --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/ListWebhookDeliveriesCmd.java @@ -0,0 +1,125 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.api.command.user; + +import java.util.Date; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.ManagementServerResponse; +import org.apache.cloudstack.mom.webhook.WebhookApiService; +import org.apache.cloudstack.mom.webhook.WebhookDelivery; +import org.apache.cloudstack.mom.webhook.api.response.WebhookDeliveryResponse; +import org.apache.cloudstack.mom.webhook.api.response.WebhookResponse; + +@APICommand(name = "listWebhookDeliveries", + description = "Lists Webhook deliveries", + responseObject = WebhookResponse.class, + responseView = ResponseObject.ResponseView.Restricted, + entityType = {WebhookDelivery.class}, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, + since = "4.20.0") +public class ListWebhookDeliveriesCmd extends BaseListCmd { + + @Inject + WebhookApiService webhookApiService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ID, type = BaseCmd.CommandType.UUID, + entityType = WebhookDeliveryResponse.class, + description = "The ID of the Webhook delivery") + private Long id; + + @Parameter(name = ApiConstants.WEBHOOK_ID, type = BaseCmd.CommandType.UUID, + entityType = WebhookResponse.class, + description = "The ID of the Webhook") + private Long webhookId; + + @Parameter(name = ApiConstants.MANAGEMENT_SERVER_ID, type = BaseCmd.CommandType.UUID, + entityType = ManagementServerResponse.class, + description = "The ID of the management server", + authorized = {RoleType.Admin}) + private Long managementServerId; + + @Parameter(name = ApiConstants.START_DATE, + type = CommandType.DATE, + description = "The start date range for the Webhook delivery " + + "(use format \"yyyy-MM-dd\" or \"yyyy-MM-dd HH:mm:ss\"). " + + "All deliveries having start date equal to or after the specified date will be listed.") + private Date startDate; + + @Parameter(name = ApiConstants.END_DATE, + type = CommandType.DATE, + description = "The end date range for the Webhook delivery " + + "(use format \"yyyy-MM-dd\" or \"yyyy-MM-dd HH:mm:ss\"). " + + "All deliveries having end date equal to or before the specified date will be listed.") + private Date endDate; + + @Parameter(name = ApiConstants.EVENT_TYPE, + type = CommandType.STRING, + description = "The event type of the Webhook delivery") + private String eventType; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + public Long getId() { + return id; + } + + public Long getWebhookId() { + return webhookId; + } + + public Long getManagementServerId() { + return managementServerId; + } + + public Date getStartDate() { + return startDate; + } + + public Date getEndDate() { + return endDate; + } + + public String getEventType() { + return eventType; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + @Override + public void execute() throws ServerApiException { + ListResponse response = webhookApiService.listWebhookDeliveries(this); + response.setResponseName(getCommandName()); + setResponseObject(response); + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/ListWebhooksCmd.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/ListWebhooksCmd.java new file mode 100644 index 00000000000..6510c308f6e --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/ListWebhooksCmd.java @@ -0,0 +1,95 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.api.command.user; + + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseListProjectAndAccountResourcesCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.mom.webhook.WebhookApiService; +import org.apache.cloudstack.mom.webhook.Webhook; +import org.apache.cloudstack.mom.webhook.api.response.WebhookResponse; + +@APICommand(name = "listWebhooks", + description = "Lists Webhooks", + responseObject = WebhookResponse.class, + responseView = ResponseObject.ResponseView.Restricted, + entityType = {Webhook.class}, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, + since = "4.20.0") +public class ListWebhooksCmd extends BaseListProjectAndAccountResourcesCmd { + + @Inject + WebhookApiService webhookApiService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, + entityType = WebhookResponse.class, + description = "The ID of the Webhook") + private Long id; + + @Parameter(name = ApiConstants.STATE, type = CommandType.STRING, description = "The state of the Webhook") + private String state; + + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "The name of the Webhook") + private String name; + + @Parameter(name = ApiConstants.SCOPE, + type = CommandType.STRING, + description = "The scope of the Webhook", + authorized = {RoleType.Admin, RoleType.DomainAdmin}) + private String scope; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + public Long getId() { + return id; + } + + public String getState() { + return state; + } + + public String getName() { + return name; + } + + public String getScope() { + return scope; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + @Override + public void execute() throws ServerApiException { + ListResponse response = webhookApiService.listWebhooks(this); + response.setResponseName(getCommandName()); + setResponseObject(response); + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/UpdateWebhookCmd.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/UpdateWebhookCmd.java new file mode 100644 index 00000000000..c2be1d3f4fa --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/command/user/UpdateWebhookCmd.java @@ -0,0 +1,136 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.api.command.user; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.mom.webhook.WebhookApiService; +import org.apache.cloudstack.mom.webhook.Webhook; +import org.apache.cloudstack.mom.webhook.api.response.WebhookResponse; + +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = "updateWebhook", + description = "Updates a Webhook", + responseObject = SuccessResponse.class, + entityType = {Webhook.class}, + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, + since = "4.20.0") +public class UpdateWebhookCmd extends BaseCmd { + + @Inject + WebhookApiService webhookApiService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, + entityType = WebhookResponse.class, + required = true, + description = "The ID of the Webhook") + private Long id; + @Parameter(name = ApiConstants.NAME, type = BaseCmd.CommandType.STRING, description = "Name for the Webhook") + private String name; + + @Parameter(name = ApiConstants.DESCRIPTION, type = BaseCmd.CommandType.STRING, description = "Description for the Webhook") + private String description; + + @Parameter(name = ApiConstants.STATE, type = BaseCmd.CommandType.STRING, description = "State of the Webhook") + private String state; + + @Parameter(name = ApiConstants.PAYLOAD_URL, + type = BaseCmd.CommandType.STRING, + description = "Payload URL of the Webhook") + private String payloadUrl; + + @Parameter(name = ApiConstants.SECRET_KEY, type = BaseCmd.CommandType.STRING, description = "Secret key of the Webhook") + private String secretKey; + + @Parameter(name = ApiConstants.SSL_VERIFICATION, type = BaseCmd.CommandType.BOOLEAN, description = "If set to true then SSL verification will be done for the Webhook otherwise not") + private Boolean sslVerification; + + @Parameter(name = ApiConstants.SCOPE, type = BaseCmd.CommandType.STRING, description = "Scope of the Webhook", + authorized = {RoleType.Admin, RoleType.DomainAdmin}) + private String scope; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + public Long getId() { + return id; + } + + public String getName() { + return name; + } + + public String getDescription() { + return description; + } + + public String getState() { + return state; + } + + public String getPayloadUrl() { + return payloadUrl; + } + + public String getSecretKey() { + return secretKey; + } + + public Boolean isSslVerification() { + return sslVerification; + } + + public String getScope() { + return scope; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccountId(); + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + @Override + public void execute() throws ServerApiException { + try { + WebhookResponse response = webhookApiService.updateWebhook(this); + if (response == null) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update Webhook"); + } + response.setResponseName(getCommandName()); + setResponseObject(response); + } catch (CloudRuntimeException ex) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/response/WebhookDeliveryResponse.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/response/WebhookDeliveryResponse.java new file mode 100644 index 00000000000..6463fe9b48b --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/response/WebhookDeliveryResponse.java @@ -0,0 +1,136 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.api.response; + + +import java.util.Date; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; +import org.apache.cloudstack.mom.webhook.WebhookDelivery; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +@EntityReference(value = {WebhookDelivery.class}) +public class WebhookDeliveryResponse extends BaseResponse { + @SerializedName(ApiConstants.ID) + @Param(description = "The ID of the Webhook delivery") + private String id; + + @SerializedName(ApiConstants.EVENT_ID) + @Param(description = "The ID of the event") + private String eventId; + + @SerializedName(ApiConstants.EVENT_TYPE) + @Param(description = "The type of the event") + private String eventType; + + @SerializedName(ApiConstants.WEBHOOK_ID) + @Param(description = "The ID of the Webhook") + private String webhookId; + + @SerializedName(ApiConstants.WEBHOOK_NAME) + @Param(description = "The name of the Webhook") + private String webhookName; + + @SerializedName(ApiConstants.MANAGEMENT_SERVER_ID) + @Param(description = "The ID of the management server which executed delivery") + private String managementServerId; + + @SerializedName(ApiConstants.MANAGEMENT_SERVER_NAME) + @Param(description = "The name of the management server which executed delivery") + private String managementServerName; + + @SerializedName(ApiConstants.HEADERS) + @Param(description = "The headers of the webhook delivery") + private String headers; + + @SerializedName(ApiConstants.PAYLOAD) + @Param(description = "The payload of the webhook delivery") + private String payload; + + @SerializedName(ApiConstants.SUCCESS) + @Param(description = "Whether Webhook delivery succeeded or not") + private boolean success; + + @SerializedName(ApiConstants.RESPONSE) + @Param(description = "The response of the webhook delivery") + private String response; + + @SerializedName(ApiConstants.START_DATE) + @Param(description = "The start time of the Webhook delivery") + private Date startTime; + + @SerializedName(ApiConstants.END_DATE) + @Param(description = "The end time of the Webhook delivery") + private Date endTime; + + public void setId(String id) { + this.id = id; + } + + public void setEventId(String eventId) { + this.eventId = eventId; + } + + public void setEventType(String eventType) { + this.eventType = eventType; + } + + public void setWebhookId(String webhookId) { + this.webhookId = webhookId; + } + + public void setWebhookName(String webhookName) { + this.webhookName = webhookName; + } + + public void setManagementServerId(String managementServerId) { + this.managementServerId = managementServerId; + } + + public void setManagementServerName(String managementServerName) { + this.managementServerName = managementServerName; + } + + public void setHeaders(String headers) { + this.headers = headers; + } + + public void setPayload(String payload) { + this.payload = payload; + } + + public void setSuccess(boolean success) { + this.success = success; + } + + public void setResponse(String response) { + this.response = response; + } + + public void setStartTime(Date startTime) { + this.startTime = startTime; + } + + public void setEndTime(Date endTime) { + this.endTime = endTime; + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/response/WebhookResponse.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/response/WebhookResponse.java new file mode 100644 index 00000000000..5e1cca36138 --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/api/response/WebhookResponse.java @@ -0,0 +1,158 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.api.response; + +import java.util.Date; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; +import org.apache.cloudstack.api.response.ControlledViewEntityResponse; +import org.apache.cloudstack.mom.webhook.Webhook; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +@EntityReference(value = {Webhook.class}) +public class WebhookResponse extends BaseResponse implements ControlledViewEntityResponse { + @SerializedName(ApiConstants.ID) + @Param(description = "The ID of the Webhook") + private String id; + + @SerializedName(ApiConstants.NAME) + @Param(description = "The name of the Webhook") + private String name; + + @SerializedName(ApiConstants.DESCRIPTION) + @Param(description = "The description of the Webhook") + private String description; + + @SerializedName(ApiConstants.STATE) + @Param(description = "The state of the Webhook") + private String state; + + @SerializedName(ApiConstants.DOMAIN_ID) + @Param(description = "The ID of the domain in which the Webhook exists") + private String domainId; + + @SerializedName(ApiConstants.DOMAIN) + @Param(description = "The name of the domain in which the Webhook exists") + private String domainName; + + @SerializedName(ApiConstants.DOMAIN_PATH) + @Param(description = "path of the domain to which the Webhook belongs") + private String domainPath; + + @SerializedName(ApiConstants.ACCOUNT) + @Param(description = "The account associated with the Webhook") + private String accountName; + + @SerializedName(ApiConstants.PROJECT_ID) + @Param(description = "the project id of the Kubernetes cluster") + private String projectId; + + @SerializedName(ApiConstants.PROJECT) + @Param(description = "the project name of the Kubernetes cluster") + private String projectName; + + @SerializedName(ApiConstants.PAYLOAD_URL) + @Param(description = "The payload URL end point for the Webhook") + private String payloadUrl; + + @SerializedName(ApiConstants.SECRET_KEY) + @Param(description = "The secret key for the Webhook") + private String secretKey; + + @SerializedName(ApiConstants.SSL_VERIFICATION) + @Param(description = "Whether SSL verification is enabled for the Webhook") + private boolean sslVerification; + + @SerializedName(ApiConstants.SCOPE) + @Param(description = "The scope of the Webhook") + private String scope; + + @SerializedName(ApiConstants.CREATED) + @Param(description = "The date when this Webhook was created") + private Date created; + + public void setId(String id) { + this.id = id; + } + + public void setName(String name) { + this.name = name; + } + + public void setDescription(String description) { + this.description = description; + } + + public void setState(String state) { + this.state = state; + } + + @Override + public void setDomainId(String domainId) { + this.domainId = domainId; + } + + @Override + public void setDomainPath(String domainPath) { + this.domainPath = domainPath; + } + + @Override + public void setDomainName(String domainName) { + this.domainName = domainName; + } + + @Override + public void setAccountName(String accountName) { + this.accountName = accountName; + } + + @Override + public void setProjectId(String projectId) { + this.projectId = projectId; + } + + @Override + public void setProjectName(String projectName) { + this.projectName = projectName; + } + + public void setPayloadUrl(String payloadUrl) { + this.payloadUrl = payloadUrl; + } + + public void setSecretKey(String secretKey) { + this.secretKey = secretKey; + } + + public void setSslVerification(boolean sslVerification) { + this.sslVerification = sslVerification; + } + + public void setScope(String scope) { + this.scope = scope; + } + + public void setCreated(Date created) { + this.created = created; + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookDao.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookDao.java new file mode 100644 index 00000000000..d26e5db7dba --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookDao.java @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.dao; + +import java.util.List; + +import org.apache.cloudstack.mom.webhook.vo.WebhookVO; + +import com.cloud.utils.db.GenericDao; + +public interface WebhookDao extends GenericDao { + List listByEnabledForDelivery(Long accountId, List domainIds); + void deleteByAccount(long accountId); + List listByAccount(long accountId); + WebhookVO findByAccountAndPayloadUrl(long accountId, String payloadUrl); +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookDaoImpl.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookDaoImpl.java new file mode 100644 index 00000000000..2ef2269a9b9 --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookDaoImpl.java @@ -0,0 +1,99 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.dao; + +import java.util.List; +import java.util.Map; + +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.mom.webhook.Webhook; +import org.apache.cloudstack.mom.webhook.vo.WebhookVO; +import org.apache.commons.collections.CollectionUtils; + +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +public class WebhookDaoImpl extends GenericDaoBase implements WebhookDao { + SearchBuilder accountIdSearch; + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + super.configure(name, params); + + accountIdSearch = createSearchBuilder(); + accountIdSearch.and("accountId", accountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + + return true; + } + @Override + public List listByEnabledForDelivery(Long accountId, List domainIds) { + SearchBuilder sb = createSearchBuilder(); + sb.and("state", sb.entity().getState(), SearchCriteria.Op.EQ); + sb.and().op("scopeGlobal", sb.entity().getScope(), SearchCriteria.Op.EQ); + if (accountId != null) { + sb.or().op("scopeLocal", sb.entity().getScope(), SearchCriteria.Op.EQ); + sb.and("accountId", sb.entity().getAccountId(), SearchCriteria.Op.EQ); + sb.cp(); + } + if (CollectionUtils.isNotEmpty(domainIds)) { + sb.or().op("scopeDomain", sb.entity().getScope(), SearchCriteria.Op.EQ); + sb.and("domainId", sb.entity().getDomainId(), SearchCriteria.Op.IN); + sb.cp(); + } + sb.cp(); + SearchCriteria sc = sb.create(); + sc.setParameters("state", Webhook.State.Enabled.name()); + sc.setParameters("scopeGlobal", Webhook.Scope.Global.name()); + if (accountId != null) { + sc.setParameters("scopeLocal", Webhook.Scope.Local.name()); + sc.setParameters("accountId", accountId); + } + if (CollectionUtils.isNotEmpty(domainIds)) { + sc.setParameters("scopeDomain", Webhook.Scope.Domain.name()); + sc.setParameters("domainId", domainIds.toArray()); + } + return listBy(sc); + } + + @Override + public void deleteByAccount(long accountId) { + SearchCriteria sc = accountIdSearch.create(); + sc.setParameters("accountId", accountId); + remove(sc); + } + + @Override + public List listByAccount(long accountId) { + SearchCriteria sc = accountIdSearch.create(); + sc.setParameters("accountId", accountId); + return listBy(sc); + } + + @Override + public WebhookVO findByAccountAndPayloadUrl(long accountId, String payloadUrl) { + SearchBuilder sb = createSearchBuilder(); + sb.and("accountId", sb.entity().getAccountId(), SearchCriteria.Op.EQ); + sb.and("payloadUrl", sb.entity().getPayloadUrl(), SearchCriteria.Op.EQ); + SearchCriteria sc = sb.create(); + sc.setParameters("accountId", accountId); + sc.setParameters("payloadUrl", payloadUrl); + return findOneBy(sc); + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookDeliveryDao.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookDeliveryDao.java new file mode 100644 index 00000000000..0fe76d2904e --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookDeliveryDao.java @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.dao; + +import java.util.Date; + +import org.apache.cloudstack.mom.webhook.vo.WebhookDeliveryVO; + +import com.cloud.utils.db.GenericDao; + +public interface WebhookDeliveryDao extends GenericDao { + int deleteByDeleteApiParams(Long id, Long webhookId, Long managementServerId, Date startDate, Date endDate); + void removeOlderDeliveries(long webhookId, long limit); +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookDeliveryDaoImpl.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookDeliveryDaoImpl.java new file mode 100644 index 00000000000..088ed53772a --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookDeliveryDaoImpl.java @@ -0,0 +1,73 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.dao; + +import java.util.Date; +import java.util.List; + +import org.apache.cloudstack.mom.webhook.vo.WebhookDeliveryVO; + +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +public class WebhookDeliveryDaoImpl extends GenericDaoBase implements WebhookDeliveryDao { + @Override + public int deleteByDeleteApiParams(Long id, Long webhookId, Long managementServerId, Date startDate, + Date endDate) { + SearchBuilder sb = createSearchBuilder(); + sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); + sb.and("webhookId", sb.entity().getWebhookId(), SearchCriteria.Op.EQ); + sb.and("managementServerId", sb.entity().getManagementServerId(), SearchCriteria.Op.EQ); + sb.and("startDate", sb.entity().getStartTime(), SearchCriteria.Op.GTEQ); + sb.and("endDate", sb.entity().getEndTime(), SearchCriteria.Op.LTEQ); + SearchCriteria sc = sb.create(); + if (id != null) { + sc.setParameters("id", id); + } + if (webhookId != null) { + sc.setParameters("webhookId", webhookId); + } + if (managementServerId != null) { + sc.setParameters("managementServerId", managementServerId); + } + if (startDate != null) { + sc.setParameters("startDate", startDate); + } + if (endDate != null) { + sc.setParameters("endDate", endDate); + } + return remove(sc); + } + + @Override + public void removeOlderDeliveries(long webhookId, long limit) { + Filter searchFilter = new Filter(WebhookDeliveryVO.class, "id", false, 0L, limit); + SearchBuilder sb = createSearchBuilder(); + sb.and("webhookId", sb.entity().getWebhookId(), SearchCriteria.Op.EQ); + SearchCriteria sc = sb.create(); + sc.setParameters("webhookId", webhookId); + List keep = listBy(sc, searchFilter); + SearchBuilder sbDelete = createSearchBuilder(); + sbDelete.and("id", sbDelete.entity().getId(), SearchCriteria.Op.NOTIN); + SearchCriteria scDelete = sbDelete.create(); + scDelete.setParameters("id", keep.stream().map(WebhookDeliveryVO::getId).toArray()); + remove(scDelete); + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookDeliveryJoinDao.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookDeliveryJoinDao.java new file mode 100644 index 00000000000..70fec4d7cbf --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookDeliveryJoinDao.java @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.dao; + +import java.util.Date; +import java.util.List; + +import org.apache.cloudstack.mom.webhook.vo.WebhookDeliveryJoinVO; + +import com.cloud.utils.Pair; +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GenericDao; + +public interface WebhookDeliveryJoinDao extends GenericDao { + Pair, Integer> searchAndCountByListApiParameters(Long id, + List webhookIds, Long managementServerId, final String keyword, final Date startDate, + final Date endDate, final String eventType, Filter searchFilter); +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookDeliveryJoinDaoImpl.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookDeliveryJoinDaoImpl.java new file mode 100644 index 00000000000..db84010fbc4 --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookDeliveryJoinDaoImpl.java @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.dao; + +import java.util.Date; +import java.util.List; + +import org.apache.cloudstack.mom.webhook.vo.WebhookDeliveryJoinVO; +import org.apache.commons.collections.CollectionUtils; + +import com.cloud.utils.Pair; +import com.cloud.utils.StringUtils; +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +public class WebhookDeliveryJoinDaoImpl extends GenericDaoBase + implements WebhookDeliveryJoinDao { + @Override + public Pair, Integer> searchAndCountByListApiParameters(Long id, + List webhookIds, Long managementServerId, String keyword, final Date startDate, + final Date endDate, final String eventType, Filter searchFilter) { + SearchBuilder sb = createSearchBuilder(); + sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); + sb.and("webhookId", sb.entity().getWebhookId(), SearchCriteria.Op.IN); + sb.and("managementServerId", sb.entity().getManagementServerMsId(), SearchCriteria.Op.EQ); + sb.and("keyword", sb.entity().getPayload(), SearchCriteria.Op.LIKE); + sb.and("startDate", sb.entity().getStartTime(), SearchCriteria.Op.GTEQ); + sb.and("endDate", sb.entity().getEndTime(), SearchCriteria.Op.LTEQ); + sb.and("eventType", sb.entity().getEventType(), SearchCriteria.Op.EQ); + SearchCriteria sc = sb.create(); + if (id != null) { + sc.setParameters("id", id); + } + if (CollectionUtils.isNotEmpty(webhookIds)) { + sc.setParameters("webhookId", webhookIds.toArray()); + } + if (managementServerId != null) { + sc.setParameters("managementServerId", managementServerId); + } + if (keyword != null) { + sc.setParameters("keyword", "%" + keyword + "%"); + } + if (startDate != null) { + sc.setParameters("startDate", startDate); + } + if (endDate != null) { + sc.setParameters("endDate", endDate); + } + if (StringUtils.isNotBlank(eventType)) { + sc.setParameters("eventType", eventType); + } + return searchAndCount(sc, searchFilter); + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookJoinDao.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookJoinDao.java new file mode 100644 index 00000000000..87b4871a14b --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookJoinDao.java @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.dao; + +import java.util.List; + +import org.apache.cloudstack.mom.webhook.vo.WebhookJoinVO; + +import com.cloud.utils.db.GenericDao; + +public interface WebhookJoinDao extends GenericDao { + List listByAccountOrDomain(long accountId, String domainPath); +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookJoinDaoImpl.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookJoinDaoImpl.java new file mode 100644 index 00000000000..986e8bc2f19 --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/dao/WebhookJoinDaoImpl.java @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.dao; + +import java.util.List; + +import org.apache.cloudstack.mom.webhook.vo.WebhookJoinVO; +import org.apache.commons.lang3.StringUtils; + +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +public class WebhookJoinDaoImpl extends GenericDaoBase implements WebhookJoinDao { + @Override + public List listByAccountOrDomain(long accountId, String domainPath) { + SearchBuilder sb = createSearchBuilder(); + sb.and().op("accountId", sb.entity().getAccountId(), SearchCriteria.Op.EQ); + if (StringUtils.isNotBlank(domainPath)) { + sb.or("domainPath", sb.entity().getDomainPath(), SearchCriteria.Op.LIKE); + } + sb.cp(); + SearchCriteria sc = sb.create(); + sc.setParameters("accountId", accountId); + if (StringUtils.isNotBlank(domainPath)) { + sc.setParameters("domainPath", domainPath); + } + return listBy(sc); + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryJoinVO.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryJoinVO.java new file mode 100644 index 00000000000..e36f870c8d9 --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryJoinVO.java @@ -0,0 +1,182 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.vo; + + +import java.util.Date; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.Temporal; +import javax.persistence.TemporalType; + +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + +import com.cloud.api.query.vo.BaseViewVO; + +@Entity +@Table(name = "webhook_delivery_view") +public class WebhookDeliveryJoinVO extends BaseViewVO implements InternalIdentity, Identity { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "event_id") + private long eventId; + + @Column(name = "event_uuid") + private String eventUuid; + + @Column(name = "event_type") + private String eventType; + + @Column(name = "webhook_id") + private long webhookId; + + @Column(name = "webhook_uuid") + private String webhookUuId; + + @Column(name = "webhook_name") + private String webhookName; + + @Column(name = "mshost_id") + private long managementServerId; + + @Column(name = "mshost_uuid") + private String managementServerUuId; + + @Column(name = "mshost_msid") + private long managementServerMsId; + + @Column(name = "mshost_name") + private String managementServerName; + + @Column(name = "headers", length = 65535) + private String headers; + + @Column(name = "payload", length = 65535) + private String payload; + + @Column(name = "success") + private boolean success; + + @Column(name = "response", length = 65535) + private String response; + + @Column(name = "start_time") + @Temporal(value = TemporalType.TIMESTAMP) + private Date startTime; + + @Column(name = "end_time") + @Temporal(value = TemporalType.TIMESTAMP) + private Date endTime; + + @Override + public long getId() { + return 0; + } + + @Override + public String getUuid() { + return uuid; + } + + public long getEventId() { + return eventId; + } + + public String getEventUuid() { + return eventUuid; + } + + public String getEventType() { + return eventType; + } + + public long getWebhookId() { + return webhookId; + } + + public String getWebhookUuId() { + return webhookUuId; + } + + public String getWebhookName() { + return webhookName; + } + + public long getManagementServerId() { + return managementServerId; + } + + public String getManagementServerUuId() { + return managementServerUuId; + } + + public long getManagementServerMsId() { + return managementServerMsId; + } + + public String getManagementServerName() { + return managementServerName; + } + + public String getHeaders() { + return headers; + } + + public String getPayload() { + return payload; + } + + public boolean isSuccess() { + return success; + } + + public String getResponse() { + return response; + } + + public Date getStartTime() { + return startTime; + } + + public Date getEndTime() { + return endTime; + } + + @Override + public String toString() { + return String.format("WebhookDelivery [%s]", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "webhookId", "startTime", "success")); + } + + public WebhookDeliveryJoinVO() { + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryVO.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryVO.java new file mode 100644 index 00000000000..e39f57a2663 --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookDeliveryVO.java @@ -0,0 +1,174 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.vo; + + +import java.util.Date; +import java.util.UUID; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.Temporal; +import javax.persistence.TemporalType; + +import org.apache.cloudstack.mom.webhook.WebhookDelivery; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + +@Entity +@Table(name = "webhook_delivery") +public class WebhookDeliveryVO implements WebhookDelivery { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "event_id") + private long eventId; + + @Column(name = "webhook_id") + private long webhookId; + + @Column(name = "mshost_msid") + private long mangementServerId; + + @Column(name = "headers", length = 65535) + private String headers; + + @Column(name = "payload", length = 65535) + private String payload; + + @Column(name = "success") + private boolean success; + + @Column(name = "response", length = 65535) + private String response; + + @Column(name = "start_time") + @Temporal(value = TemporalType.TIMESTAMP) + private Date startTime; + + @Column(name = "end_time") + @Temporal(value = TemporalType.TIMESTAMP) + private Date endTime; + + @Override + public long getId() { + return id; + } + + @Override + public String getUuid() { + return uuid; + } + + @Override + public long getEventId() { + return eventId; + } + + @Override + public long getWebhookId() { + return webhookId; + } + + @Override + public long getManagementServerId() { + return mangementServerId; + } + + public String getHeaders() { + return headers; + } + + @Override + public String getPayload() { + return payload; + } + + @Override + public boolean isSuccess() { + return success; + } + + @Override + public String getResponse() { + return response; + } + + @Override + public Date getStartTime() { + return startTime; + } + + @Override + public Date getEndTime() { + return endTime; + } + + @Override + public String toString() { + return String.format("WebhookDelivery [%s]", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "webhookId", "startTime", "success")); + } + + public WebhookDeliveryVO() { + this.uuid = UUID.randomUUID().toString(); + } + + public WebhookDeliveryVO(long eventId, long webhookId, long managementServerId, String headers, String payload, + boolean success, String response, Date startTime, Date endTime) { + this.uuid = UUID.randomUUID().toString(); + this.eventId = eventId; + this.webhookId = webhookId; + this.mangementServerId = managementServerId; + this.headers = headers; + this.payload = payload; + this.success = success; + this.response = response; + this.startTime = startTime; + this.endTime = endTime; + } + + + + /* + * For creating a dummy object for testing delivery + */ + public WebhookDeliveryVO(long managementServerId, String headers, String payload, boolean success, + String response, Date startTime, Date endTime) { + this.id = WebhookDelivery.ID_DUMMY; + this.uuid = UUID.randomUUID().toString(); + this.eventId = WebhookDelivery.ID_DUMMY; + this.webhookId = WebhookDelivery.ID_DUMMY; + this.mangementServerId = managementServerId; + this.headers = headers; + this.payload = payload; + this.success = success; + this.response = response; + this.startTime = startTime; + this.endTime = endTime; + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookJoinVO.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookJoinVO.java new file mode 100644 index 00000000000..f1708609587 --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookJoinVO.java @@ -0,0 +1,234 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.vo; + + +import java.util.Date; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.EnumType; +import javax.persistence.Enumerated; +import javax.persistence.Id; +import javax.persistence.Table; + +import org.apache.cloudstack.mom.webhook.Webhook; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + +import com.cloud.api.query.vo.ControlledViewEntity; +import com.cloud.user.Account; +import com.cloud.utils.db.Encrypt; +import com.cloud.utils.db.GenericDao; + +@Entity +@Table(name = "webhook_view") +public class WebhookJoinVO implements ControlledViewEntity { + + @Id + @Column(name = "id", updatable = false, nullable = false) + private long id; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "name") + private String name; + + @Column(name = "description", length = 4096) + private String description; + + @Column(name = "state") + @Enumerated(value = EnumType.STRING) + private Webhook.State state; + + @Column(name = "payload_url") + private String payloadUrl; + + @Column(name = "secret_key") + @Encrypt + private String secretKey; + + @Column(name = "ssl_verification") + private boolean sslVerification; + + @Column(name = "scope") + @Enumerated(value = EnumType.STRING) + private Webhook.Scope scope; + + @Column(name = GenericDao.CREATED_COLUMN) + private Date created; + + @Column(name = GenericDao.REMOVED_COLUMN) + private Date removed; + + @Column(name = "account_id") + private long accountId; + + @Column(name = "account_uuid") + private String accountUuid; + + @Column(name = "account_name") + private String accountName; + + @Column(name = "account_type") + @Enumerated(value = EnumType.STRING) + private Account.Type accountType; + + @Column(name = "domain_id") + private long domainId; + + @Column(name = "domain_uuid") + private String domainUuid; + + @Column(name = "domain_name") + private String domainName; + + @Column(name = "domain_path") + private String domainPath; + + @Column(name = "project_id") + private long projectId; + + @Column(name = "project_uuid") + private String projectUuid; + + @Column(name = "project_name") + private String projectName; + + @Override + public long getId() { + return id; + } + + @Override + public String getUuid() { + return uuid; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public Webhook.State getState() { + return state; + } + + public String getPayloadUrl() { + return payloadUrl; + } + + public void setPayloadUrl(String payloadUrl) { + this.payloadUrl = payloadUrl; + } + + public String getSecretKey() { + return secretKey; + } + + public Webhook.Scope getScope() { + return scope; + } + + public boolean isSslVerification() { + return sslVerification; + } + + public Date getCreated() { + return created; + } + + public Date getRemoved() { + return removed; + } + + @Override + public long getDomainId() { + return domainId; + } + + @Override + public String getDomainPath() { + return domainPath; + } + + @Override + public String getDomainUuid() { + return domainUuid; + } + + @Override + public String getDomainName() { + return domainName; + } + + @Override + public Account.Type getAccountType() { + return accountType; + } + + @Override + public long getAccountId() { + return accountId; + } + + @Override + public String getAccountUuid() { + return accountUuid; + } + + @Override + public String getAccountName() { + return accountName; + } + + @Override + public String getProjectUuid() { + return projectUuid; + } + + @Override + public String getProjectName() { + return projectName; + } + + @Override + public Class getEntityType() { + return Webhook.class; + } + + @Override + public String toString() { + return String.format("Webhook [%s]", ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name")); + } + + public WebhookJoinVO() { + } +} diff --git a/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookVO.java b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookVO.java new file mode 100644 index 00000000000..93e3e801423 --- /dev/null +++ b/plugins/event-bus/webhook/src/main/java/org/apache/cloudstack/mom/webhook/vo/WebhookVO.java @@ -0,0 +1,232 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.vo; + + +import java.util.Date; +import java.util.UUID; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.EnumType; +import javax.persistence.Enumerated; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import org.apache.cloudstack.mom.webhook.Webhook; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + +import com.cloud.utils.db.Encrypt; +import com.cloud.utils.db.GenericDao; + +@Entity +@Table(name = "webhook") +public class WebhookVO implements Webhook { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "name") + private String name; + + @Column(name = "description", length = 4096) + private String description; + + @Column(name = "state") + @Enumerated(value = EnumType.STRING) + private State state; + + @Column(name = "domain_id") + private long domainId; + + @Column(name = "account_id") + private long accountId; + + @Column(name = "payload_url") + private String payloadUrl; + + @Column(name = "secret_key") + @Encrypt + private String secretKey; + + @Column(name = "ssl_verification") + private boolean sslVerification; + + @Column(name = "scope") + @Enumerated(value = EnumType.STRING) + private Scope scope; + + @Column(name = GenericDao.CREATED_COLUMN) + private Date created; + + @Column(name = GenericDao.REMOVED_COLUMN) + private Date removed; + + @Override + public long getId() { + return id; + } + + @Override + public String getUuid() { + return uuid; + } + + @Override + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Override + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + @Override + public State getState() { + return state; + } + + public void setState(State state) { + this.state = state; + } + + @Override + public long getDomainId() { + return domainId; + } + + public void setDomainId(long domainId) { + this.domainId = domainId; + } + + @Override + public long getAccountId() { + return accountId; + } + + public void setAccountId(long accountId) { + this.accountId = accountId; + } + + @Override + public String getPayloadUrl() { + return payloadUrl; + } + + public void setPayloadUrl(String payloadUrl) { + this.payloadUrl = payloadUrl; + } + + @Override + public String getSecretKey() { + return secretKey; + } + + public void setSecretKey(String secretKey) { + this.secretKey = secretKey; + } + + @Override + public Scope getScope() { + return scope; + } + + public void setScope(Scope scope) { + this.scope = scope; + } + + @Override + public boolean isSslVerification() { + return sslVerification; + } + + public void setSslVerification(boolean sslVerification) { + this.sslVerification = sslVerification; + } + + @Override + public Date getCreated() { + return created; + } + + public Date getRemoved() { + return removed; + } + + @Override + public Class getEntityType() { + return Webhook.class; + } + + @Override + public String toString() { + return String.format("Webhook [%s]",ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "id", "uuid", "name", "payloadUrl")); + } + + public WebhookVO() { + this.uuid = UUID.randomUUID().toString(); + } + + public WebhookVO(String name, String description, State state, long domainId, long accountId, + String payloadUrl, String secretKey, boolean sslVerification, Scope scope) { + this.uuid = UUID.randomUUID().toString(); + this.name = name; + this.description = description; + this.state = state; + this.domainId = domainId; + this.accountId = accountId; + this.payloadUrl = payloadUrl; + this.secretKey = secretKey; + this.sslVerification = sslVerification; + this.scope = scope; + } + + /* + * For creating a dummy rule for testing delivery + */ + public WebhookVO(long domainId, long accountId, String payloadUrl, String secretKey, boolean sslVerification) { + this.uuid = UUID.randomUUID().toString(); + this.id = ID_DUMMY; + this.name = NAME_DUMMY; + this.description = NAME_DUMMY; + this.state = State.Enabled; + this.domainId = domainId; + this.accountId = accountId; + this.payloadUrl = payloadUrl; + this.secretKey = secretKey; + this.sslVerification = sslVerification; + this.scope = Scope.Local; + } +} diff --git a/plugins/event-bus/webhook/src/main/resources/META-INF/cloudstack/webhook/module.properties b/plugins/event-bus/webhook/src/main/resources/META-INF/cloudstack/webhook/module.properties new file mode 100644 index 00000000000..299144ff82a --- /dev/null +++ b/plugins/event-bus/webhook/src/main/resources/META-INF/cloudstack/webhook/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=webhook +parent=event diff --git a/plugins/event-bus/webhook/src/main/resources/META-INF/cloudstack/webhook/spring-event-webhook-context.xml b/plugins/event-bus/webhook/src/main/resources/META-INF/cloudstack/webhook/spring-event-webhook-context.xml new file mode 100644 index 00000000000..22f688c781f --- /dev/null +++ b/plugins/event-bus/webhook/src/main/resources/META-INF/cloudstack/webhook/spring-event-webhook-context.xml @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + diff --git a/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/WebhookApiServiceImplTest.java b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/WebhookApiServiceImplTest.java new file mode 100644 index 00000000000..dff35806984 --- /dev/null +++ b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/WebhookApiServiceImplTest.java @@ -0,0 +1,253 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.mom.webhook; + +import java.util.List; + +import org.apache.cloudstack.acl.SecurityChecker; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.mom.webhook.api.command.user.DeleteWebhookCmd; +import org.apache.cloudstack.mom.webhook.api.response.WebhookResponse; +import org.apache.cloudstack.mom.webhook.dao.WebhookDao; +import org.apache.cloudstack.mom.webhook.dao.WebhookJoinDao; +import org.apache.cloudstack.mom.webhook.vo.WebhookJoinVO; +import org.apache.cloudstack.mom.webhook.vo.WebhookVO; +import org.apache.commons.collections.CollectionUtils; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.api.ApiResponseHelper; +import com.cloud.domain.DomainVO; +import com.cloud.domain.dao.DomainDao; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; + +@RunWith(MockitoJUnitRunner.class) +public class WebhookApiServiceImplTest { + + @Mock + WebhookDao webhookDao; + @Mock + WebhookJoinDao webhookJoinDao; + @Mock + AccountManager accountManager; + + @Mock + DomainDao domainDao; + + @InjectMocks + WebhookApiServiceImpl webhookApiServiceImpl = new WebhookApiServiceImpl(); + + private WebhookJoinVO prepareTestWebhookJoinVO() { + String name = "webhook"; + String description = "webhook-description"; + Webhook.State state = Webhook.State.Enabled; + String payloadUrl = "url"; + String secretKey = "key"; + boolean sslVerification = false; + Webhook.Scope scope = Webhook.Scope.Local; + WebhookJoinVO webhookJoinVO = new WebhookJoinVO(); + ReflectionTestUtils.setField(webhookJoinVO, "name", name); + ReflectionTestUtils.setField(webhookJoinVO, "description", description); + ReflectionTestUtils.setField(webhookJoinVO, "state", state); + ReflectionTestUtils.setField(webhookJoinVO, "payloadUrl", payloadUrl); + ReflectionTestUtils.setField(webhookJoinVO, "secretKey", secretKey); + ReflectionTestUtils.setField(webhookJoinVO, "sslVerification", sslVerification); + ReflectionTestUtils.setField(webhookJoinVO, "scope", scope); + return webhookJoinVO; + } + + private void validateWebhookResponseWithWebhookJoinVO(WebhookResponse response, WebhookJoinVO webhookJoinVO) { + Assert.assertEquals(webhookJoinVO.getName(), ReflectionTestUtils.getField(response, "name")); + Assert.assertEquals(webhookJoinVO.getDescription(), ReflectionTestUtils.getField(response, "description")); + Assert.assertEquals(webhookJoinVO.getState().toString(), ReflectionTestUtils.getField(response, "state")); + Assert.assertEquals(webhookJoinVO.getPayloadUrl(), ReflectionTestUtils.getField(response, "payloadUrl")); + Assert.assertEquals(webhookJoinVO.getSecretKey(), ReflectionTestUtils.getField(response, "secretKey")); + Assert.assertEquals(webhookJoinVO.isSslVerification(), ReflectionTestUtils.getField(response, "sslVerification")); + Assert.assertEquals(webhookJoinVO.getScope().toString(), ReflectionTestUtils.getField(response, "scope")); + } + + @Test + public void testCreateWebhookResponse() { + WebhookJoinVO webhookJoinVO = prepareTestWebhookJoinVO(); + try (MockedStatic mockedApiResponseHelper = Mockito.mockStatic(ApiResponseHelper.class)) { + WebhookResponse response = webhookApiServiceImpl.createWebhookResponse(webhookJoinVO); + validateWebhookResponseWithWebhookJoinVO(response, webhookJoinVO); + } + } + + @Test + public void testCreateWebhookResponseId() { + WebhookJoinVO webhookJoinVO = prepareTestWebhookJoinVO(); + long id = 1L; + Mockito.when(webhookJoinDao.findById(id)).thenReturn(webhookJoinVO); + try (MockedStatic mockedApiResponseHelper = Mockito.mockStatic(ApiResponseHelper.class)) { + WebhookResponse response = webhookApiServiceImpl.createWebhookResponse(id); + validateWebhookResponseWithWebhookJoinVO(response, webhookJoinVO); + } + } + + @Test + public void testGetIdsOfAccessibleWebhooksAdmin() { + Account account = Mockito.mock(Account.class); + Mockito.when(account.getType()).thenReturn(Account.Type.ADMIN); + Assert.assertTrue(CollectionUtils.isEmpty(webhookApiServiceImpl.getIdsOfAccessibleWebhooks(account))); + } + + @Test + public void testGetIdsOfAccessibleWebhooksDomainAdmin() { + Long accountId = 1L; + Account account = Mockito.mock(Account.class); + Mockito.when(account.getType()).thenReturn(Account.Type.DOMAIN_ADMIN); + Mockito.when(account.getDomainId()).thenReturn(1L); + Mockito.when(account.getId()).thenReturn(accountId); + String domainPath = "d1"; + DomainVO domain = Mockito.mock(DomainVO.class); + Mockito.when(domain.getPath()).thenReturn(domainPath); + Mockito.when(domainDao.findById(1L)).thenReturn(domain); + WebhookJoinVO webhookJoinVO = Mockito.mock(WebhookJoinVO.class); + Mockito.when(webhookJoinVO.getId()).thenReturn(1L); + Mockito.when(webhookJoinDao.listByAccountOrDomain(accountId, domainPath)).thenReturn(List.of(webhookJoinVO)); + List result = webhookApiServiceImpl.getIdsOfAccessibleWebhooks(account); + Assert.assertTrue(CollectionUtils.isNotEmpty(result)); + Assert.assertEquals(1, result.size()); + } + + @Test + public void testGetIdsOfAccessibleWebhooksNormalUser() { + Long accountId = 1L; + Account account = Mockito.mock(Account.class); + Mockito.when(account.getType()).thenReturn(Account.Type.NORMAL); + Mockito.when(account.getId()).thenReturn(accountId); + WebhookJoinVO webhookJoinVO = Mockito.mock(WebhookJoinVO.class); + Mockito.when(webhookJoinVO.getId()).thenReturn(1L); + Mockito.when(webhookJoinDao.listByAccountOrDomain(accountId, null)).thenReturn(List.of(webhookJoinVO)); + List result = webhookApiServiceImpl.getIdsOfAccessibleWebhooks(account); + Assert.assertTrue(CollectionUtils.isNotEmpty(result)); + Assert.assertEquals(1, result.size()); + } + + @Test(expected = InvalidParameterValueException.class) + public void testDeleteWebhookInvalidWebhook() { + try (MockedStatic callContextMocked = Mockito.mockStatic(CallContext.class)) { + DeleteWebhookCmd cmd = Mockito.mock(DeleteWebhookCmd.class); + Mockito.when(cmd.getId()).thenReturn(1L); + CallContext callContextMock = Mockito.mock(CallContext.class); + callContextMocked.when(CallContext::current).thenReturn(callContextMock); + webhookApiServiceImpl.deleteWebhook(cmd); + } + } + + @Test(expected = PermissionDeniedException.class) + public void testDeleteWebhookNoPermission() { + try (MockedStatic callContextMocked = Mockito.mockStatic(CallContext.class)) { + DeleteWebhookCmd cmd = Mockito.mock(DeleteWebhookCmd.class); + Mockito.when(cmd.getId()).thenReturn(1L); + WebhookVO webhookVO = Mockito.mock(WebhookVO.class); + Mockito.when(webhookDao.findById(1L)).thenReturn(webhookVO); + CallContext callContextMock = Mockito.mock(CallContext.class); + Account account = Mockito.mock(Account.class); + Mockito.when(callContextMock.getCallingAccount()).thenReturn(account); + callContextMocked.when(CallContext::current).thenReturn(callContextMock); + Mockito.doThrow(PermissionDeniedException.class).when(accountManager).checkAccess(account, + SecurityChecker.AccessType.OperateEntry, false, webhookVO); + webhookApiServiceImpl.deleteWebhook(cmd); + } + } + + @Test + public void testDeleteWebhook() { + try (MockedStatic callContextMocked = Mockito.mockStatic(CallContext.class)) { + DeleteWebhookCmd cmd = Mockito.mock(DeleteWebhookCmd.class); + Mockito.when(cmd.getId()).thenReturn(1L); + WebhookVO webhookVO = Mockito.mock(WebhookVO.class); + Mockito.when(webhookDao.findById(1L)).thenReturn(webhookVO); + CallContext callContextMock = Mockito.mock(CallContext.class); + Account account = Mockito.mock(Account.class); + Mockito.when(callContextMock.getCallingAccount()).thenReturn(account); + callContextMocked.when(CallContext::current).thenReturn(callContextMock); + Mockito.doNothing().when(accountManager).checkAccess(account, + SecurityChecker.AccessType.OperateEntry, false, webhookVO); + Mockito.doReturn(true).when(webhookDao).remove(Mockito.anyLong()); + Assert.assertTrue(webhookApiServiceImpl.deleteWebhook(cmd)); + } + } + + @Test + public void testValidateWebhookOwnerPayloadUrlNonExistent() { + Mockito.when(webhookDao.findByAccountAndPayloadUrl(Mockito.anyLong(), Mockito.anyString())).thenReturn(null); + Account account = Mockito.mock(Account.class); + String url = "url"; + webhookApiServiceImpl.validateWebhookOwnerPayloadUrl(account, url, null); + webhookApiServiceImpl.validateWebhookOwnerPayloadUrl(account, url, Mockito.mock(Webhook.class)); + } + + @Test(expected = InvalidParameterValueException.class) + public void testValidateWebhookOwnerPayloadUrlCreateExist() { + Mockito.when(webhookDao.findByAccountAndPayloadUrl(Mockito.anyLong(), Mockito.anyString())) + .thenReturn(Mockito.mock(WebhookVO.class)); + webhookApiServiceImpl.validateWebhookOwnerPayloadUrl(Mockito.mock(Account.class), "url", + null); + } + + private Webhook mockWebhook(long id) { + Webhook webhook = Mockito.mock(Webhook.class); + Mockito.when(webhook.getId()).thenReturn(id); + return webhook; + } + + private WebhookVO mockWebhookVO(long id) { + WebhookVO webhook = Mockito.mock(WebhookVO.class); + Mockito.when(webhook.getId()).thenReturn(id); + return webhook; + } + + @Test + public void testValidateWebhookOwnerPayloadUrlUpdateSameExist() { + WebhookVO webhookVO = mockWebhookVO(1L); + Mockito.when(webhookDao.findByAccountAndPayloadUrl(Mockito.anyLong(), Mockito.anyString())) + .thenReturn(webhookVO); + webhookApiServiceImpl.validateWebhookOwnerPayloadUrl(Mockito.mock(Account.class), "url", + mockWebhook(1L)); + } + + @Test(expected = InvalidParameterValueException.class) + public void testValidateWebhookOwnerPayloadUrlUpdateDifferentExist() { + WebhookVO webhookVO = mockWebhookVO(2L); + Mockito.when(webhookDao.findByAccountAndPayloadUrl(Mockito.anyLong(), Mockito.anyString())) + .thenReturn(webhookVO); + webhookApiServiceImpl.validateWebhookOwnerPayloadUrl(Mockito.mock(Account.class), "url", + mockWebhook(1L)); + } + + @Test + public void testGetNormalizedPayloadUrl() { + Assert.assertEquals("http://abc.com", webhookApiServiceImpl.getNormalizedPayloadUrl("abc.com")); + Assert.assertEquals("http://abc.com", webhookApiServiceImpl.getNormalizedPayloadUrl("http://abc.com")); + Assert.assertEquals("https://abc.com", + webhookApiServiceImpl.getNormalizedPayloadUrl("https://abc.com")); + } +} diff --git a/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/WebhookDeliveryThreadTest.java b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/WebhookDeliveryThreadTest.java new file mode 100644 index 00000000000..3be8dee5c2e --- /dev/null +++ b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/WebhookDeliveryThreadTest.java @@ -0,0 +1,62 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook; + +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; + +import org.apache.commons.codec.DecoderException; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class WebhookDeliveryThreadTest { + @InjectMocks + WebhookDeliveryThread webhookDeliveryThread; + + @Test + public void testIsValidJson() { + Assert.assertFalse(webhookDeliveryThread.isValidJson("text")); + Assert.assertTrue(webhookDeliveryThread.isValidJson("{ \"CloudStack\": \"works!\" }")); + Assert.assertTrue(webhookDeliveryThread.isValidJson("[{ \"CloudStack\": \"works!\" }]")); + } + + @Test + public void testGenerateHMACSignature() { + String data = "CloudStack works!"; + String key = "Pj4pnwSUBZ4wQFXw2zWdVY1k5Ku9bIy70wCNG1DmS8keO7QapCLw2Axtgc2nEPYzfFCfB38ATNLt6caDqU2dSw"; + String result = "HYLWSII5Ap23WeSaykNsIo6mOhmV3d18s5p2cq2ebCA="; + try { + String sign = WebhookDeliveryThread.generateHMACSignature(data, key); + Assert.assertEquals(result, sign); + } catch (InvalidKeyException | NoSuchAlgorithmException | DecoderException e) { + Assert.fail(e.getMessage()); + } + } + + @Test + public void testSetDeliveryTries() { + int tries = 2; + webhookDeliveryThread.setDeliveryTries(tries); + Assert.assertEquals(tries, ReflectionTestUtils.getField(webhookDeliveryThread, "deliveryTries")); + } +} diff --git a/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/WebhookEventBusTest.java b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/WebhookEventBusTest.java new file mode 100644 index 00000000000..ebd3f9e828c --- /dev/null +++ b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/WebhookEventBusTest.java @@ -0,0 +1,106 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook; + +import java.util.HashMap; +import java.util.UUID; + +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.framework.events.Event; +import org.apache.cloudstack.framework.events.EventBusException; +import org.apache.cloudstack.framework.events.EventSubscriber; +import org.apache.cloudstack.framework.events.EventTopic; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class WebhookEventBusTest { + + @Mock + WebhookService webhookService; + @InjectMocks + WebhookEventBus eventBus = new WebhookEventBus(); + + @Test + public void testConfigure() { + String name = "name"; + try { + Assert.assertTrue(eventBus.configure(name, new HashMap<>())); + String result = (String)ReflectionTestUtils.getField(eventBus, "_name"); + Assert.assertEquals(name, result); + } catch (ConfigurationException e) { + Assert.fail("Error configuring"); + } + } + + @Test + public void testSetName() { + String name = "name"; + eventBus.setName(name); + String result = (String)ReflectionTestUtils.getField(eventBus, "_name"); + Assert.assertEquals(name, result); + } + + @Test + public void testGetName() { + String name = "name"; + ReflectionTestUtils.setField(eventBus, "_name", name); + Assert.assertEquals(name, eventBus.getName()); + } + + @Test + public void testStart() { + Assert.assertTrue(eventBus.start()); + } + + @Test + public void testStop() { + Assert.assertTrue(eventBus.stop()); + } + + @Test + public void testSubscribe() { + try { + Assert.assertNotNull(eventBus.subscribe(Mockito.mock(EventTopic.class), Mockito.mock(EventSubscriber.class))); + } catch (EventBusException e) { + Assert.fail("Error subscribing"); + } + } + + @Test + public void testUnsubscribe() { + try { + eventBus.unsubscribe(Mockito.mock(UUID.class), Mockito.mock(EventSubscriber.class)); + } catch (EventBusException e) { + Assert.fail("Error unsubscribing"); + } + } + + @Test(expected = EventBusException.class) + public void testPublishException() throws EventBusException { + Mockito.doThrow(EventBusException.class).when(webhookService).handleEvent(Mockito.any(Event.class)); + eventBus.publish(Mockito.mock(Event.class)); + } +} diff --git a/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/CreateWebhookCmdTest.java b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/CreateWebhookCmdTest.java new file mode 100644 index 00000000000..7736a42af04 --- /dev/null +++ b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/CreateWebhookCmdTest.java @@ -0,0 +1,173 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.api.command.user; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.UUID; + +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.mom.webhook.WebhookApiService; +import org.apache.cloudstack.mom.webhook.api.response.WebhookResponse; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.user.Account; +import com.cloud.user.AccountVO; +import com.cloud.user.User; +import com.cloud.user.UserVO; +import com.cloud.utils.exception.CloudRuntimeException; + +@RunWith(MockitoJUnitRunner.class) +public class CreateWebhookCmdTest { + + @Mock + WebhookApiService webhookApiService; + + private Object getCommandMethodValue(Object obj, String methodName) { + Object result = null; + try { + Method method = obj.getClass().getMethod(methodName); + result = method.invoke(obj); + } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException e) { + Assert.fail(String.format("Failed to get method %s value", methodName)); + } + return result; + } + + private void runStringMemberTest(String memberName) { + String methodName = "get" + memberName.substring(0, 1).toUpperCase() + memberName.substring(1); + CreateWebhookCmd cmd = new CreateWebhookCmd(); + ReflectionTestUtils.setField(cmd, memberName, null); + Assert.assertNull(getCommandMethodValue(cmd, methodName)); + String value = UUID.randomUUID().toString(); + ReflectionTestUtils.setField(cmd, memberName, value); + Assert.assertEquals(value, getCommandMethodValue(cmd, methodName)); + } + + private void runLongMemberTest(String memberName) { + String methodName = "get" + memberName.substring(0, 1).toUpperCase() + memberName.substring(1); + CreateWebhookCmd cmd = new CreateWebhookCmd(); + ReflectionTestUtils.setField(cmd, memberName, null); + Assert.assertNull(getCommandMethodValue(cmd, methodName)); + Long value = 100L; + ReflectionTestUtils.setField(cmd, memberName, value); + Assert.assertEquals(value, getCommandMethodValue(cmd, methodName)); + } + + private void runBooleanMemberTest(String memberName) { + String methodName = "is" + memberName.substring(0, 1).toUpperCase() + memberName.substring(1); + CreateWebhookCmd cmd = new CreateWebhookCmd(); + ReflectionTestUtils.setField(cmd, memberName, null); + Assert.assertFalse((boolean)getCommandMethodValue(cmd, methodName)); + Boolean value = true; + ReflectionTestUtils.setField(cmd, memberName, value); + Assert.assertEquals(value, getCommandMethodValue(cmd, methodName)); + } + + @Test + public void testGetName() { + runStringMemberTest("name"); + } + + @Test + public void testGetDescription() { + runStringMemberTest("description"); + } + + @Test + public void testGetPayloadUrl() { + runStringMemberTest("payloadUrl"); + } + + @Test + public void testGetSecretKey() { + runStringMemberTest("secretKey"); + } + + @Test + public void testGetScope() { + runStringMemberTest("scope"); + } + + @Test + public void testGetState() { + runStringMemberTest("state"); + } + + @Test + public void testGetAccount() { + runStringMemberTest("accountName"); + } + + @Test + public void testGetDomainId() { + runLongMemberTest("domainId"); + } + + @Test + public void testGetProjectId() { + runLongMemberTest("projectId"); + } + + @Test + public void testIsSslVerification() { + runBooleanMemberTest("sslVerification"); + } + + @Test + public void testGetEntityOwnerId() { + Account account = new AccountVO("testaccount", 1L, "networkdomain", Account.Type.NORMAL, "uuid"); + UserVO user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + CallContext.register(user, account); + CreateWebhookCmd cmd = new CreateWebhookCmd(); + Assert.assertEquals(account.getId(), cmd.getEntityOwnerId()); + } + + @Test(expected = ServerApiException.class) + public void testExecuteNullResponse() { + CreateWebhookCmd cmd = new CreateWebhookCmd(); + cmd.webhookApiService = webhookApiService; + Mockito.when(webhookApiService.createWebhook(cmd)).thenReturn(null); + cmd.execute(); + } + + @Test(expected = ServerApiException.class) + public void testExecuteCRE() { + CreateWebhookCmd cmd = new CreateWebhookCmd(); + cmd.webhookApiService = webhookApiService; + Mockito.when(webhookApiService.createWebhook(cmd)).thenThrow(CloudRuntimeException.class); + cmd.execute(); + } + + @Test + public void testExecute() { + CreateWebhookCmd cmd = new CreateWebhookCmd(); + cmd.webhookApiService = webhookApiService; + WebhookResponse response = new WebhookResponse(); + Mockito.when(webhookApiService.createWebhook(cmd)).thenReturn(response); + cmd.execute(); + Assert.assertEquals(cmd.getCommandName(), response.getResponseName()); + } +} diff --git a/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/DeleteWebhookCmdTest.java b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/DeleteWebhookCmdTest.java new file mode 100644 index 00000000000..e9aa61aabb8 --- /dev/null +++ b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/DeleteWebhookCmdTest.java @@ -0,0 +1,106 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.api.command.user; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.UUID; + +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.mom.webhook.WebhookApiService; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.user.Account; +import com.cloud.user.AccountVO; +import com.cloud.user.User; +import com.cloud.user.UserVO; +import com.cloud.utils.exception.CloudRuntimeException; + +@RunWith(MockitoJUnitRunner.class) +public class DeleteWebhookCmdTest { + + @Mock + WebhookApiService webhookApiService; + + private Object getCommandMethodValue(Object obj, String methodName) { + Object result = null; + try { + Method method = obj.getClass().getMethod(methodName); + result = method.invoke(obj); + } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException e) { + Assert.fail(String.format("Failed to get method %s value", methodName)); + } + return result; + } + + private void runLongMemberTest(String memberName) { + String methodName = "get" + memberName.substring(0, 1).toUpperCase() + memberName.substring(1); + DeleteWebhookCmd cmd = new DeleteWebhookCmd(); + ReflectionTestUtils.setField(cmd, memberName, null); + Assert.assertNull(getCommandMethodValue(cmd, methodName)); + Long value = 100L; + ReflectionTestUtils.setField(cmd, memberName, value); + Assert.assertEquals(value, getCommandMethodValue(cmd, methodName)); + } + + @Test + public void testGetId() { + runLongMemberTest("id"); + } + + @Test + public void testGetEntityOwnerId() { + Account account = new AccountVO("testaccount", 1L, "networkdomain", Account.Type.NORMAL, "uuid"); + UserVO user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + CallContext.register(user, account); + DeleteWebhookCmd cmd = new DeleteWebhookCmd(); + Assert.assertEquals(account.getId(), cmd.getEntityOwnerId()); + } + + @Test(expected = ServerApiException.class) + public void testExecuteFalseResponse() { + DeleteWebhookCmd cmd = new DeleteWebhookCmd(); + cmd.webhookApiService = webhookApiService; + Mockito.when(webhookApiService.deleteWebhook(cmd)).thenReturn(false); + cmd.execute(); + } + + @Test(expected = ServerApiException.class) + public void testExecuteCRE() { + DeleteWebhookCmd cmd = new DeleteWebhookCmd(); + cmd.webhookApiService = webhookApiService; + Mockito.when(webhookApiService.deleteWebhook(cmd)).thenThrow(CloudRuntimeException.class); + cmd.execute(); + } + + @Test + public void testExecute() { + DeleteWebhookCmd cmd = new DeleteWebhookCmd(); + cmd.webhookApiService = webhookApiService; + Mockito.when(webhookApiService.deleteWebhook(cmd)).thenReturn(true); + cmd.execute(); + Assert.assertNotNull(cmd.getResponseObject()); + } +} diff --git a/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/DeleteWebhookDeliveryCmdTest.java b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/DeleteWebhookDeliveryCmdTest.java new file mode 100644 index 00000000000..2a090eb7fb1 --- /dev/null +++ b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/DeleteWebhookDeliveryCmdTest.java @@ -0,0 +1,108 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.api.command.user; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.UUID; + +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.mom.webhook.WebhookApiService; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.user.Account; +import com.cloud.user.AccountVO; +import com.cloud.user.User; +import com.cloud.user.UserVO; +import com.cloud.utils.exception.CloudRuntimeException; + +@RunWith(MockitoJUnitRunner.class) +public class DeleteWebhookDeliveryCmdTest { + + @Mock + WebhookApiService webhookApiService; + + private Object getCommandMethodValue(Object obj, String methodName) { + Object result = null; + try { + Method method = obj.getClass().getMethod(methodName); + result = method.invoke(obj); + } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException e) { + Assert.fail(String.format("Failed to get method %s value", methodName)); + } + return result; + } + + private void runLongMemberTest(String memberName) { + String methodName = "get" + memberName.substring(0, 1).toUpperCase() + memberName.substring(1); + DeleteWebhookDeliveryCmd cmd = new DeleteWebhookDeliveryCmd(); + ReflectionTestUtils.setField(cmd, memberName, null); + Assert.assertNull(getCommandMethodValue(cmd, methodName)); + Long value = 100L; + ReflectionTestUtils.setField(cmd, memberName, value); + Assert.assertEquals(value, getCommandMethodValue(cmd, methodName)); + } + + @Test + public void testGetId() { + runLongMemberTest("id"); + } + + @Test + public void testGetWebhookId() { + runLongMemberTest("webhookId"); + } + + @Test + public void testGetManagementServerId() { + runLongMemberTest("managementServerId"); + } + + @Test + public void testGetEntityOwnerId() { + Account account = new AccountVO("testaccount", 1L, "networkdomain", Account.Type.NORMAL, "uuid"); + UserVO user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + CallContext.register(user, account); + DeleteWebhookDeliveryCmd cmd = new DeleteWebhookDeliveryCmd(); + Assert.assertEquals(account.getId(), cmd.getEntityOwnerId()); + } + + @Test(expected = ServerApiException.class) + public void testExecuteCRE() { + DeleteWebhookDeliveryCmd cmd = new DeleteWebhookDeliveryCmd(); + cmd.webhookApiService = webhookApiService; + Mockito.when(webhookApiService.deleteWebhookDelivery(cmd)).thenThrow(CloudRuntimeException.class); + cmd.execute(); + } + + @Test + public void testExecute() { + DeleteWebhookDeliveryCmd cmd = new DeleteWebhookDeliveryCmd(); + cmd.webhookApiService = webhookApiService; + Mockito.when(webhookApiService.deleteWebhookDelivery(cmd)).thenReturn(10); + cmd.execute(); + Assert.assertNotNull(cmd.getResponseObject()); + } +} diff --git a/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/ExecuteWebhookDeliveryCmdTest.java b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/ExecuteWebhookDeliveryCmdTest.java new file mode 100644 index 00000000000..84d51a1e18d --- /dev/null +++ b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/ExecuteWebhookDeliveryCmdTest.java @@ -0,0 +1,153 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.api.command.user; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.UUID; + +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.mom.webhook.WebhookApiService; +import org.apache.cloudstack.mom.webhook.api.response.WebhookDeliveryResponse; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.user.Account; +import com.cloud.user.AccountVO; +import com.cloud.user.User; +import com.cloud.user.UserVO; +import com.cloud.utils.exception.CloudRuntimeException; + +@RunWith(MockitoJUnitRunner.class) +public class ExecuteWebhookDeliveryCmdTest { + + @Mock + WebhookApiService webhookApiService; + + private Object getCommandMethodValue(Object obj, String methodName) { + Object result = null; + try { + Method method = obj.getClass().getMethod(methodName); + result = method.invoke(obj); + } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException e) { + Assert.fail(String.format("Failed to get method %s value", methodName)); + } + return result; + } + + private void runStringMemberTest(String memberName) { + String methodName = "get" + memberName.substring(0, 1).toUpperCase() + memberName.substring(1); + ExecuteWebhookDeliveryCmd cmd = new ExecuteWebhookDeliveryCmd(); + ReflectionTestUtils.setField(cmd, memberName, null); + Assert.assertNull(getCommandMethodValue(cmd, methodName)); + String value = UUID.randomUUID().toString(); + ReflectionTestUtils.setField(cmd, memberName, value); + Assert.assertEquals(value, getCommandMethodValue(cmd, methodName)); + } + + private void runLongMemberTest(String memberName) { + String methodName = "get" + memberName.substring(0, 1).toUpperCase() + memberName.substring(1); + ExecuteWebhookDeliveryCmd cmd = new ExecuteWebhookDeliveryCmd(); + ReflectionTestUtils.setField(cmd, memberName, null); + Assert.assertNull(getCommandMethodValue(cmd, methodName)); + Long value = 100L; + ReflectionTestUtils.setField(cmd, memberName, value); + Assert.assertEquals(value, getCommandMethodValue(cmd, methodName)); + } + + private void runBooleanMemberTest(String memberName) { + String methodName = "is" + memberName.substring(0, 1).toUpperCase() + memberName.substring(1); + ExecuteWebhookDeliveryCmd cmd = new ExecuteWebhookDeliveryCmd(); + ReflectionTestUtils.setField(cmd, memberName, null); + Assert.assertNull(getCommandMethodValue(cmd, methodName)); + Boolean value = true; + ReflectionTestUtils.setField(cmd, memberName, value); + Assert.assertEquals(value, getCommandMethodValue(cmd, methodName)); + } + + @Test + public void testGetId() { + runLongMemberTest("id"); + } + + @Test + public void testGetWebhookId() { + runLongMemberTest("webhookId"); + } + + @Test + public void testGetPayloadUrl() { + runStringMemberTest("payloadUrl"); + } + + @Test + public void testGetSecretKey() { + runStringMemberTest("secretKey"); + } + + @Test + public void testIsSslVerification() { + runBooleanMemberTest("sslVerification"); + } + + @Test + public void testGetPayload() { + runStringMemberTest("payload"); + } + + @Test + public void testGetEntityOwnerId() { + Account account = new AccountVO("testaccount", 1L, "networkdomain", Account.Type.NORMAL, "uuid"); + UserVO user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + CallContext.register(user, account); + ExecuteWebhookDeliveryCmd cmd = new ExecuteWebhookDeliveryCmd(); + Assert.assertEquals(account.getId(), cmd.getEntityOwnerId()); + } + + @Test(expected = ServerApiException.class) + public void testExecuteNullResponse() { + ExecuteWebhookDeliveryCmd cmd = new ExecuteWebhookDeliveryCmd(); + cmd.webhookApiService = webhookApiService; + Mockito.when(webhookApiService.executeWebhookDelivery(cmd)).thenReturn(null); + cmd.execute(); + } + + @Test(expected = ServerApiException.class) + public void testExecuteCRE() { + ExecuteWebhookDeliveryCmd cmd = new ExecuteWebhookDeliveryCmd(); + cmd.webhookApiService = webhookApiService; + Mockito.when(webhookApiService.executeWebhookDelivery(cmd)).thenThrow(CloudRuntimeException.class); + cmd.execute(); + } + + @Test + public void testExecute() { + ExecuteWebhookDeliveryCmd cmd = new ExecuteWebhookDeliveryCmd(); + cmd.webhookApiService = webhookApiService; + WebhookDeliveryResponse response = new WebhookDeliveryResponse(); + Mockito.when(webhookApiService.executeWebhookDelivery(cmd)).thenReturn(response); + cmd.execute(); + Assert.assertEquals(cmd.getCommandName(), response.getResponseName()); + } +} diff --git a/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/ListWebhookDeliveriesCmdTest.java b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/ListWebhookDeliveriesCmdTest.java new file mode 100644 index 00000000000..6359b042c40 --- /dev/null +++ b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/ListWebhookDeliveriesCmdTest.java @@ -0,0 +1,141 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.api.command.user; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.UUID; + +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.mom.webhook.WebhookApiService; +import org.apache.cloudstack.mom.webhook.api.response.WebhookDeliveryResponse; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.user.Account; +import com.cloud.user.AccountVO; +import com.cloud.user.User; +import com.cloud.user.UserVO; + +@RunWith(MockitoJUnitRunner.class) +public class ListWebhookDeliveriesCmdTest { + + @Mock + WebhookApiService webhookApiService; + + private Object getCommandMethodValue(Object obj, String methodName) { + Object result = null; + try { + Method method = obj.getClass().getMethod(methodName); + result = method.invoke(obj); + } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException e) { + Assert.fail(String.format("Failed to get method %s value", methodName)); + } + return result; + } + + private void runLongMemberTest(String memberName) { + String methodName = "get" + memberName.substring(0, 1).toUpperCase() + memberName.substring(1); + ListWebhookDeliveriesCmd cmd = new ListWebhookDeliveriesCmd(); + ReflectionTestUtils.setField(cmd, memberName, null); + Assert.assertNull(getCommandMethodValue(cmd, methodName)); + Long value = 100L; + ReflectionTestUtils.setField(cmd, memberName, value); + Assert.assertEquals(value, getCommandMethodValue(cmd, methodName)); + } + + private void runStringMemberTest(String memberName) { + String methodName = "get" + memberName.substring(0, 1).toUpperCase() + memberName.substring(1); + ListWebhookDeliveriesCmd cmd = new ListWebhookDeliveriesCmd(); + ReflectionTestUtils.setField(cmd, memberName, null); + Assert.assertNull(getCommandMethodValue(cmd, methodName)); + String value = UUID.randomUUID().toString(); + ReflectionTestUtils.setField(cmd, memberName, value); + Assert.assertEquals(value, getCommandMethodValue(cmd, methodName)); + } + + private void runDateMemberTest(String memberName) { + String methodName = "get" + memberName.substring(0, 1).toUpperCase() + memberName.substring(1); + ListWebhookDeliveriesCmd cmd = new ListWebhookDeliveriesCmd(); + ReflectionTestUtils.setField(cmd, memberName, null); + Assert.assertNull(getCommandMethodValue(cmd, methodName)); + Date value = new Date(); + ReflectionTestUtils.setField(cmd, memberName, value); + Assert.assertEquals(value, getCommandMethodValue(cmd, methodName)); + } + + @Test + public void testGetId() { + runLongMemberTest("id"); + } + + @Test + public void testGetWebhookId() { + runLongMemberTest("webhookId"); + } + + @Test + public void testGetManagementServerId() { + runLongMemberTest("managementServerId"); + } + + @Test + public void testStartDate() { + runDateMemberTest("startDate"); + } + + @Test + public void testEndDate() { + runDateMemberTest("endDate"); + } + + @Test + public void testEventType() { + runStringMemberTest("eventType"); + } + + @Test + public void testGetEntityOwnerId() { + Account account = new AccountVO("testaccount", 1L, "networkdomain", Account.Type.NORMAL, "uuid"); + UserVO user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + CallContext.register(user, account); + ListWebhookDeliveriesCmd cmd = new ListWebhookDeliveriesCmd(); + Assert.assertEquals(account.getId(), cmd.getEntityOwnerId()); + } + + @Test + public void testExecute() { + ListWebhookDeliveriesCmd cmd = new ListWebhookDeliveriesCmd(); + cmd.webhookApiService = webhookApiService; + List responseList = new ArrayList<>(); + ListResponse listResponse = new ListResponse<>(); + listResponse.setResponses(responseList); + Mockito.when(webhookApiService.listWebhookDeliveries(cmd)).thenReturn(listResponse); + cmd.execute(); + Assert.assertNotNull(cmd.getResponseObject()); + } +} diff --git a/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/ListWebhooksCmdTest.java b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/ListWebhooksCmdTest.java new file mode 100644 index 00000000000..1cbf9d1e836 --- /dev/null +++ b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/ListWebhooksCmdTest.java @@ -0,0 +1,105 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.api.command.user; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.mom.webhook.WebhookApiService; +import org.apache.cloudstack.mom.webhook.api.response.WebhookResponse; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +@RunWith(MockitoJUnitRunner.class) +public class ListWebhooksCmdTest { + + @Mock + WebhookApiService webhookApiService; + + private Object getCommandMethodValue(Object obj, String methodName) { + Object result = null; + try { + Method method = obj.getClass().getMethod(methodName); + result = method.invoke(obj); + } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException e) { + Assert.fail(String.format("Failed to get method %s value", methodName)); + } + return result; + } + + private void runStringMemberTest(String memberName) { + String methodName = "get" + memberName.substring(0, 1).toUpperCase() + memberName.substring(1); + ListWebhooksCmd cmd = new ListWebhooksCmd(); + ReflectionTestUtils.setField(cmd, memberName, null); + Assert.assertNull(getCommandMethodValue(cmd, methodName)); + String value = UUID.randomUUID().toString(); + ReflectionTestUtils.setField(cmd, memberName, value); + Assert.assertEquals(value, getCommandMethodValue(cmd, methodName)); + } + + private void runLongMemberTest(String memberName) { + String methodName = "get" + memberName.substring(0, 1).toUpperCase() + memberName.substring(1); + ListWebhooksCmd cmd = new ListWebhooksCmd(); + ReflectionTestUtils.setField(cmd, memberName, null); + Assert.assertNull(getCommandMethodValue(cmd, methodName)); + Long value = 100L; + ReflectionTestUtils.setField(cmd, memberName, value); + Assert.assertEquals(value, getCommandMethodValue(cmd, methodName)); + } + + @Test + public void testGetId() { + runLongMemberTest("id"); + } + + @Test + public void testGetName() { + runStringMemberTest("name"); + } + + @Test + public void testGetState() { + runStringMemberTest("state"); + } + + @Test + public void testGetScope() { + runStringMemberTest("scope"); + } + + @Test + public void testExecute() { + ListWebhooksCmd cmd = new ListWebhooksCmd(); + cmd.webhookApiService = webhookApiService; + List responseList = new ArrayList<>(); + ListResponse listResponse = new ListResponse<>(); + listResponse.setResponses(responseList); + Mockito.when(webhookApiService.listWebhooks(cmd)).thenReturn(listResponse); + cmd.execute(); + Assert.assertNotNull(cmd.getResponseObject()); + } +} diff --git a/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/UpdateWebhookCmdTest.java b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/UpdateWebhookCmdTest.java new file mode 100644 index 00000000000..719e63cadf2 --- /dev/null +++ b/plugins/event-bus/webhook/src/test/java/org/apache/cloudstack/mom/webhook/api/command/user/UpdateWebhookCmdTest.java @@ -0,0 +1,163 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.mom.webhook.api.command.user; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.UUID; + +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.mom.webhook.WebhookApiService; +import org.apache.cloudstack.mom.webhook.api.response.WebhookResponse; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.user.Account; +import com.cloud.user.AccountVO; +import com.cloud.user.User; +import com.cloud.user.UserVO; +import com.cloud.utils.exception.CloudRuntimeException; + +@RunWith(MockitoJUnitRunner.class) +public class UpdateWebhookCmdTest { + + @Mock + WebhookApiService webhookApiService; + + private Object getCommandMethodValue(Object obj, String methodName) { + Object result = null; + try { + Method method = obj.getClass().getMethod(methodName); + result = method.invoke(obj); + } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException e) { + Assert.fail(String.format("Failed to get method %s value", methodName)); + } + return result; + } + + private void runStringMemberTest(String memberName) { + String methodName = "get" + memberName.substring(0, 1).toUpperCase() + memberName.substring(1); + UpdateWebhookCmd cmd = new UpdateWebhookCmd(); + ReflectionTestUtils.setField(cmd, memberName, null); + Assert.assertNull(getCommandMethodValue(cmd, methodName)); + String value = UUID.randomUUID().toString(); + ReflectionTestUtils.setField(cmd, memberName, value); + Assert.assertEquals(value, getCommandMethodValue(cmd, methodName)); + } + + private void runLongMemberTest(String memberName) { + String methodName = "get" + memberName.substring(0, 1).toUpperCase() + memberName.substring(1); + UpdateWebhookCmd cmd = new UpdateWebhookCmd(); + ReflectionTestUtils.setField(cmd, memberName, null); + Assert.assertNull(getCommandMethodValue(cmd, methodName)); + Long value = 100L; + ReflectionTestUtils.setField(cmd, memberName, value); + Assert.assertEquals(value, getCommandMethodValue(cmd, methodName)); + } + + private void runBooleanMemberTest(String memberName) { + String methodName = "is" + memberName.substring(0, 1).toUpperCase() + memberName.substring(1); + UpdateWebhookCmd cmd = new UpdateWebhookCmd(); + ReflectionTestUtils.setField(cmd, memberName, null); + Assert.assertNull(getCommandMethodValue(cmd, methodName)); + Boolean value = true; + ReflectionTestUtils.setField(cmd, memberName, value); + Assert.assertEquals(value, getCommandMethodValue(cmd, methodName)); + } + + @Test + public void testGetId() { + runLongMemberTest("id"); + } + + @Test + public void testGetName() { + runStringMemberTest("name"); + } + + @Test + public void testGetDescription() { + runStringMemberTest("description"); + } + + @Test + public void testGetPayloadUrl() { + runStringMemberTest("payloadUrl"); + } + + @Test + public void testGetSecretKey() { + runStringMemberTest("secretKey"); + } + + @Test + public void testGetScope() { + runStringMemberTest("scope"); + } + + @Test + public void testGetState() { + runStringMemberTest("state"); + } + + @Test + public void testIsSslVerification() { + runBooleanMemberTest("sslVerification"); + } + + @Test + public void testGetEntityOwnerId() { + Account account = new AccountVO("testaccount", 1L, "networkdomain", Account.Type.NORMAL, "uuid"); + UserVO user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + CallContext.register(user, account); + UpdateWebhookCmd cmd = new UpdateWebhookCmd(); + Assert.assertEquals(account.getId(), cmd.getEntityOwnerId()); + } + + @Test(expected = ServerApiException.class) + public void testExecuteNullResponse() { + UpdateWebhookCmd cmd = new UpdateWebhookCmd(); + cmd.webhookApiService = webhookApiService; + Mockito.when(webhookApiService.updateWebhook(cmd)).thenReturn(null); + cmd.execute(); + } + + @Test(expected = ServerApiException.class) + public void testExecuteCRE() { + UpdateWebhookCmd cmd = new UpdateWebhookCmd(); + cmd.webhookApiService = webhookApiService; + Mockito.when(webhookApiService.updateWebhook(cmd)).thenThrow(CloudRuntimeException.class); + cmd.execute(); + } + + @Test + public void testExecute() { + UpdateWebhookCmd cmd = new UpdateWebhookCmd(); + cmd.webhookApiService = webhookApiService; + WebhookResponse response = new WebhookResponse(); + Mockito.when(webhookApiService.updateWebhook(cmd)).thenReturn(response); + cmd.execute(); + Assert.assertEquals(cmd.getCommandName(), response.getResponseName()); + } +} diff --git a/plugins/ha-planners/skip-heurestics/src/main/resources/META-INF/cloudstack/skip-heurestics/spring-skip-heurestics-context.xml b/plugins/ha-planners/skip-heurestics/src/main/resources/META-INF/cloudstack/skip-heurestics/spring-skip-heurestics-context.xml index 3cb5a55bb15..f28d67c5bc1 100644 --- a/plugins/ha-planners/skip-heurestics/src/main/resources/META-INF/cloudstack/skip-heurestics/spring-skip-heurestics-context.xml +++ b/plugins/ha-planners/skip-heurestics/src/main/resources/META-INF/cloudstack/skip-heurestics/spring-skip-heurestics-context.xml @@ -1,12 +1,12 @@ - hostsCopy = null; - List suitableHosts = new ArrayList(); + List suitableHosts = new ArrayList<>(); if (type == Host.Type.Storage) { return suitableHosts; @@ -107,7 +106,7 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { } if (hosts != null) { // retain all computing hosts, regardless of whether they support routing...it's random after all - hostsCopy = new ArrayList(hosts); + hostsCopy = new ArrayList<>(hosts); if (ObjectUtils.anyNotNull(offeringHostTag, templateTag)) { hostsCopy.retainAll(listHostsByTags(type, dcId, podId, clusterId, offeringHostTag, templateTag)); } else { @@ -124,14 +123,15 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { hostsCopy = ListUtils.union(hostsCopy, _hostDao.findHostsWithTagRuleThatMatchComputeOferringTags(offeringHostTag)); if (hostsCopy.isEmpty()) { - logger.error(String.format("No suitable host found for vm [%s] with tags [%s].", vmProfile, hostTag)); - throw new CloudRuntimeException(String.format("No suitable host found for vm [%s].", vmProfile)); + logger.info("No suitable host found for VM [{}] in {}.", vmProfile, hostTag); + return null; } - logger.debug("Random Allocator found " + hostsCopy.size() + " hosts"); - if (hostsCopy.size() == 0) { + logger.debug("Random Allocator found {} hosts", hostsCopy.size()); + if (hostsCopy.isEmpty()) { return suitableHosts; } + Collections.shuffle(hostsCopy); for (Host host : hostsCopy) { if (suitableHosts.size() == returnUpTo) { @@ -174,7 +174,7 @@ public class RandomAllocator extends AdapterBase implements HostAllocator { if (logger.isDebugEnabled()) { logger.debug("Random Allocator found 0 hosts as given host list is empty"); } - return new ArrayList(); + return new ArrayList<>(); } return findSuitableHosts(vmProfile, plan, type, avoid, hosts, returnUpTo, considerReservedCapacity); } diff --git a/plugins/host-allocators/random/src/main/resources/META-INF/cloudstack/host-allocator-random/spring-host-allocator-random-context.xml b/plugins/host-allocators/random/src/main/resources/META-INF/cloudstack/host-allocator-random/spring-host-allocator-random-context.xml index 979ccc43e08..d84eaafaa5a 100644 --- a/plugins/host-allocators/random/src/main/resources/META-INF/cloudstack/host-allocator-random/spring-host-allocator-random-context.xml +++ b/plugins/host-allocators/random/src/main/resources/META-INF/cloudstack/host-allocator-random/spring-host-allocator-random-context.xml @@ -30,5 +30,5 @@ - + diff --git a/plugins/hypervisors/hyperv/conf/log4j-cloud.xml.in b/plugins/hypervisors/hyperv/conf/log4j-cloud.xml.in index 0292dffb3a9..104e52233a9 100644 --- a/plugins/hypervisors/hyperv/conf/log4j-cloud.xml.in +++ b/plugins/hypervisors/hyperv/conf/log4j-cloud.xml.in @@ -30,7 +30,7 @@ under the License. - + @@ -39,7 +39,7 @@ under the License. - + diff --git a/plugins/hypervisors/hyperv/pom.xml b/plugins/hypervisors/hyperv/pom.xml index b24c4c8a847..56b2b6d1503 100644 --- a/plugins/hypervisors/hyperv/pom.xml +++ b/plugins/hypervisors/hyperv/pom.xml @@ -55,7 +55,7 @@ org.codehaus.mojo exec-maven-plugin - 1.2.1 + ${cs.exec-maven-plugin.version} java com.cloud.agent.AgentShell @@ -137,6 +137,7 @@ org.codehaus.mojo exec-maven-plugin + ${cs.exec-maven-plugin.version} compile diff --git a/plugins/hypervisors/hyperv/src/main/resources/META-INF/cloudstack/hyperv-compute/spring-hyperv-compute-context.xml b/plugins/hypervisors/hyperv/src/main/resources/META-INF/cloudstack/hyperv-compute/spring-hyperv-compute-context.xml index ffb6440d3cf..5549a6b13d7 100644 --- a/plugins/hypervisors/hyperv/src/main/resources/META-INF/cloudstack/hyperv-compute/spring-hyperv-compute-context.xml +++ b/plugins/hypervisors/hyperv/src/main/resources/META-INF/cloudstack/hyperv-compute/spring-hyperv-compute-context.xml @@ -1,12 +1,12 @@ - ${project.version} compile - - org.apache.cloudstack - cloud-plugin-network-tungsten - ${project.version} - diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index b5ec716e805..abd6e24a532 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.hypervisor.kvm.resource; +import static com.cloud.host.Host.HOST_INSTANCE_CONVERSION; import static com.cloud.host.Host.HOST_VOLUME_ENCRYPTION; import java.io.BufferedReader; @@ -43,6 +44,8 @@ import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; import javax.naming.ConfigurationException; import javax.xml.parsers.DocumentBuilder; @@ -75,6 +78,8 @@ import org.apache.commons.lang.BooleanUtils; import org.apache.commons.lang.math.NumberUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.builder.ReflectionToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import org.apache.xerces.impl.xpath.regex.Match; @@ -306,6 +311,16 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv public static final String TUNGSTEN_PATH = "scripts/vm/network/tungsten"; + public static final String INSTANCE_CONVERSION_SUPPORTED_CHECK_CMD = "virt-v2v --version"; + // virt-v2v --version => sample output: virt-v2v 1.42.0rhel=8,release=22.module+el8.10.0+1590+a67ab969 + public static final String OVF_EXPORT_SUPPORTED_CHECK_CMD = "ovftool --version"; + // ovftool --version => sample output: VMware ovftool 4.6.0 (build-21452615) + public static final String OVF_EXPORT_TOOl_GET_VERSION_CMD = "ovftool --version | awk '{print $3}'"; + + public static final String WINDOWS_GUEST_CONVERSION_SUPPORTED_CHECK_CMD = "rpm -qa | grep -i virtio-win"; + public static final String UBUNTU_WINDOWS_GUEST_CONVERSION_SUPPORTED_CHECK_CMD = "dpkg -l virtio-win"; + public static final String UBUNTU_NBDKIT_PKG_CHECK_CMD = "dpkg -l nbdkit"; + private String modifyVlanPath; private String versionStringPath; private String patchScriptPath; @@ -315,6 +330,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv private String createTmplPath; private String heartBeatPath; private String vmActivityCheckPath; + private String nasBackupPath; private String securityGroupPath; private String ovsPvlanDhcpHostPath; private String ovsPvlanVmPath; @@ -434,7 +450,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv protected WatchDogModel watchDogModel = WatchDogModel.I6300ESB; private final Map pifs = new HashMap(); - private final Map vmStats = new ConcurrentHashMap(); + private final Map vmStats = new ConcurrentHashMap<>(); private final Map vmDiskStats = new ConcurrentHashMap<>(); @@ -703,6 +719,10 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv return vmActivityCheckPath; } + public String getNasBackupPath() { + return nasBackupPath; + } + public String getOvsPvlanDhcpHostPath() { return ovsPvlanDhcpHostPath; } @@ -973,6 +993,11 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv throw new ConfigurationException("Unable to find kvmvmactivity.sh"); } + nasBackupPath = Script.findScript(kvmScriptsDir, "nasbackup.sh"); + if (nasBackupPath == null) { + throw new ConfigurationException("Unable to find nasbackup.sh"); + } + createTmplPath = Script.findScript(storageScriptsDir, "createtmplt.sh"); if (createTmplPath == null) { throw new ConfigurationException("Unable to find the createtmplt.sh"); @@ -2325,7 +2350,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv final PowerState s = convertToPowerState(vms.getInfo().state); return s; } catch (final LibvirtException e) { - LOGGER.warn("Can't get vm state " + vmName + e.getMessage() + "retry:" + retry); + LOGGER.error("Could not get state for VM [{}] (retry={}) due to:", vmName, retry, e); } finally { try { if (vms != null) { @@ -2827,6 +2852,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv GuestDef guest = new GuestDef(); configureGuestAndVMHypervisorType(vmTO, vm, guest); + guest.setManufacturer(vmTO.getMetadataManufacturer()); + guest.setProduct(vmTO.getMetadataProductName()); guest.setGuestArch(guestCpuArch != null ? guestCpuArch : vmTO.getArch()); guest.setMachineType(isGuestAarch64() ? VIRT : PC); guest.setBootType(GuestDef.BootType.BIOS); @@ -3632,7 +3659,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv if (dpdkSupport) { capabilities += ",dpdk"; } - final StartupRoutingCommand cmd = new StartupRoutingCommand(info.getAllocatableCpus(), info.getCpuSpeed(), info.getTotalMemory(), info.getReservedMemory(), capabilities, hypervisorType, RouterPrivateIpStrategy.HostLocal); @@ -3647,10 +3673,14 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv cmd.setIqn(getIqn()); cmd.getHostDetails().put(HOST_VOLUME_ENCRYPTION, String.valueOf(hostSupportsVolumeEncryption())); cmd.setHostTags(getHostTags()); + cmd.getHostDetails().put(HOST_INSTANCE_CONVERSION, String.valueOf(hostSupportsInstanceConversion())); HealthCheckResult healthCheckResult = getHostHealthCheckResult(); if (healthCheckResult != HealthCheckResult.IGNORE) { cmd.setHostHealthCheckResult(healthCheckResult == HealthCheckResult.SUCCESS); } + if (StringUtils.isNotBlank(info.getCpuArch())) { + cmd.setCpuArch(info.getCpuArch()); + } if (cmd.getHostDetails().containsKey("Host.OS")) { hostDistro = cmd.getHostDetails().get("Host.OS"); @@ -3797,29 +3827,29 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } public List getAllVmNames(final Connect conn) { - final ArrayList la = new ArrayList(); + final ArrayList domainNames = new ArrayList(); try { final String names[] = conn.listDefinedDomains(); for (int i = 0; i < names.length; i++) { - la.add(names[i]); + domainNames.add(names[i]); } } catch (final LibvirtException e) { - LOGGER.warn("Failed to list Defined domains", e); + logger.warn("Failed to list defined domains", e); } int[] ids = null; try { ids = conn.listDomains(); } catch (final LibvirtException e) { - LOGGER.warn("Failed to list domains", e); - return la; + logger.warn("Failed to list domains", e); + return domainNames; } Domain dm = null; for (int i = 0; i < ids.length; i++) { try { dm = conn.domainLookupByID(ids[i]); - la.add(dm.getName()); + domainNames.add(dm.getName()); } catch (final LibvirtException e) { LOGGER.warn("Unable to get vms", e); } finally { @@ -3833,7 +3863,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } } - return la; + return domainNames; } private HashMap getHostVmStateReport() { @@ -4388,122 +4418,34 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv return null; } - private class VmStats { - long usedTime; - long tx; - long rx; - long ioRead; - long ioWrote; - long bytesRead; - long bytesWrote; - Calendar timestamp; + private String vmToString(Domain dm) throws LibvirtException { + return String.format("{\"name\":\"%s\",\"uuid\":\"%s\"}", dm.getName(), dm.getUUIDString()); } + /** + * Returns metrics for the period since this function was last called for the specified VM. + * @param conn the Libvirt connection. + * @param vmName name of the VM. + * @return metrics for the period since last time this function was called for the VM. + * @throws LibvirtException + */ public VmStatsEntry getVmStat(final Connect conn, final String vmName) throws LibvirtException { Domain dm = null; try { + LOGGER.debug("Trying to get VM with name [{}].", vmName); dm = getDomain(conn, vmName); if (dm == null) { + LOGGER.warn("Could not get VM with name [{}].", vmName); return null; } - DomainInfo info = dm.getInfo(); - final VmStatsEntry stats = new VmStatsEntry(); - stats.setNumCPUs(info.nrVirtCpu); - stats.setEntityType("vm"); + LibvirtExtendedVmStatsEntry newStats = getVmCurrentStats(dm); + LibvirtExtendedVmStatsEntry oldStats = vmStats.get(vmName); - stats.setMemoryKBs(info.maxMem); - stats.setTargetMemoryKBs(info.memory); - stats.setIntFreeMemoryKBs(getMemoryFreeInKBs(dm)); + VmStatsEntry metrics = calculateVmMetrics(dm, oldStats, newStats); + vmStats.put(vmName, newStats); - /* get cpu utilization */ - VmStats oldStats = null; - - final Calendar now = Calendar.getInstance(); - - oldStats = vmStats.get(vmName); - - long elapsedTime = 0; - if (oldStats != null) { - elapsedTime = now.getTimeInMillis() - oldStats.timestamp.getTimeInMillis(); - double utilization = (info.cpuTime - oldStats.usedTime) / ((double)elapsedTime * 1000000); - - utilization = utilization / info.nrVirtCpu; - if (utilization > 0) { - stats.setCPUUtilization(utilization * 100); - } - } - - /* get network stats */ - - final List vifs = getInterfaces(conn, vmName); - long rx = 0; - long tx = 0; - for (final InterfaceDef vif : vifs) { - final DomainInterfaceStats ifStats = dm.interfaceStats(vif.getDevName()); - rx += ifStats.rx_bytes; - tx += ifStats.tx_bytes; - } - - if (oldStats != null) { - final double deltarx = rx - oldStats.rx; - if (deltarx > 0) { - stats.setNetworkReadKBs(deltarx / 1024); - } - final double deltatx = tx - oldStats.tx; - if (deltatx > 0) { - stats.setNetworkWriteKBs(deltatx / 1024); - } - } - - /* get disk stats */ - final List disks = getDisks(conn, vmName); - long io_rd = 0; - long io_wr = 0; - long bytes_rd = 0; - long bytes_wr = 0; - for (final DiskDef disk : disks) { - if (disk.getDeviceType() == DeviceType.CDROM || disk.getDeviceType() == DeviceType.FLOPPY) { - continue; - } - final DomainBlockStats blockStats = dm.blockStats(disk.getDiskLabel()); - io_rd += blockStats.rd_req; - io_wr += blockStats.wr_req; - bytes_rd += blockStats.rd_bytes; - bytes_wr += blockStats.wr_bytes; - } - - if (oldStats != null) { - final long deltaiord = io_rd - oldStats.ioRead; - if (deltaiord > 0) { - stats.setDiskReadIOs(deltaiord); - } - final long deltaiowr = io_wr - oldStats.ioWrote; - if (deltaiowr > 0) { - stats.setDiskWriteIOs(deltaiowr); - } - final double deltabytesrd = bytes_rd - oldStats.bytesRead; - if (deltabytesrd > 0) { - stats.setDiskReadKBs(deltabytesrd / 1024); - } - final double deltabyteswr = bytes_wr - oldStats.bytesWrote; - if (deltabyteswr > 0) { - stats.setDiskWriteKBs(deltabyteswr / 1024); - } - } - - /* save to Hashmap */ - final VmStats newStat = new VmStats(); - newStat.usedTime = info.cpuTime; - newStat.rx = rx; - newStat.tx = tx; - newStat.ioRead = io_rd; - newStat.ioWrote = io_wr; - newStat.bytesRead = bytes_rd; - newStat.bytesWrote = bytes_wr; - newStat.timestamp = now; - vmStats.put(vmName, newStat); - return stats; + return metrics; } finally { if (dm != null) { dm.free(); @@ -4511,6 +4453,156 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } } + /** + * Returns a VM's current statistics. + * @param dm domain of the VM. + * @return current statistics of the VM. + * @throws LibvirtException + */ + protected LibvirtExtendedVmStatsEntry getVmCurrentStats(final Domain dm) throws LibvirtException { + final LibvirtExtendedVmStatsEntry stats = new LibvirtExtendedVmStatsEntry(); + + getVmCurrentCpuStats(dm, stats); + getVmCurrentNetworkStats(dm, stats); + getVmCurrentDiskStats(dm, stats); + + LOGGER.debug("Retrieved statistics for VM [{}]: [{}].", vmToString(dm), stats); + stats.setTimestamp(Calendar.getInstance()); + return stats; + } + + /** + * Passes a VM's current CPU statistics into the provided LibvirtExtendedVmStatsEntry. + * @param dm domain of the VM. + * @param stats LibvirtExtendedVmStatsEntry that will receive the current CPU statistics. + * @throws LibvirtException + */ + protected void getVmCurrentCpuStats(final Domain dm, final LibvirtExtendedVmStatsEntry stats) throws LibvirtException { + LOGGER.trace("Getting CPU stats for VM [{}].", vmToString(dm)); + stats.setCpuTime(dm.getInfo().cpuTime); + } + + /** + * Passes a VM's current network statistics into the provided LibvirtExtendedVmStatsEntry. + * @param dm domain of the VM. + * @param stats LibvirtExtendedVmStatsEntry that will receive the current network statistics. + * @throws LibvirtException + */ + protected void getVmCurrentNetworkStats(final Domain dm, final LibvirtExtendedVmStatsEntry stats) throws LibvirtException { + final String vmAsString = vmToString(dm); + LOGGER.trace("Getting network stats for VM [{}].", vmAsString); + final List vifs = getInterfaces(dm.getConnect(), dm.getName()); + LOGGER.debug("Found [{}] network interface(s) for VM [{}].", vifs.size(), vmAsString); + double rx = 0; + double tx = 0; + for (final InterfaceDef vif : vifs) { + final DomainInterfaceStats ifStats = dm.interfaceStats(vif.getDevName()); + rx += ifStats.rx_bytes; + tx += ifStats.tx_bytes; + } + stats.setNetworkReadKBs(rx / 1024); + stats.setNetworkWriteKBs(tx / 1024); + } + + /** + * Passes a VM's current disk statistics into the provided LibvirtExtendedVmStatsEntry. + * @param dm domain of the VM. + * @param stats LibvirtExtendedVmStatsEntry that will receive the current disk statistics. + * @throws LibvirtException + */ + protected void getVmCurrentDiskStats(final Domain dm, final LibvirtExtendedVmStatsEntry stats) throws LibvirtException { + final String vmAsString = vmToString(dm); + LOGGER.trace("Getting disk stats for VM [{}].", vmAsString); + final List disks = getDisks(dm.getConnect(), dm.getName()); + LOGGER.debug("Found [{}] disk(s) for VM [{}].", disks.size(), vmAsString); + long io_rd = 0; + long io_wr = 0; + double bytes_rd = 0; + double bytes_wr = 0; + for (final DiskDef disk : disks) { + if (disk.getDeviceType() == DeviceType.CDROM || disk.getDeviceType() == DeviceType.FLOPPY) { + LOGGER.debug("Ignoring disk [{}] in VM [{}]'s stats since its deviceType is [{}].", disk.toString().replace("\n", ""), vmAsString, disk.getDeviceType()); + continue; + } + final DomainBlockStats blockStats = dm.blockStats(disk.getDiskLabel()); + io_rd += blockStats.rd_req; + io_wr += blockStats.wr_req; + bytes_rd += blockStats.rd_bytes; + bytes_wr += blockStats.wr_bytes; + } + stats.setDiskReadIOs(io_rd); + stats.setDiskWriteIOs(io_wr); + stats.setDiskReadKBs(bytes_rd / 1024); + stats.setDiskWriteKBs(bytes_wr / 1024); + } + + /** + * Calculates a VM's metrics for the period between the two statistics given as parameters. + * @param dm domain of the VM. + * @param oldStats old statistics. If null, the CPU, network and disk utilization won't be calculated. + * @param newStats new statistics. + * @return metrics for the period between the two statistics. + * @throws LibvirtException + */ + protected VmStatsEntry calculateVmMetrics(final Domain dm, final LibvirtExtendedVmStatsEntry oldStats, final LibvirtExtendedVmStatsEntry newStats) throws LibvirtException { + final VmStatsEntry metrics = new VmStatsEntry(); + final DomainInfo info = dm.getInfo(); + final String vmAsString = vmToString(dm); + + metrics.setEntityType("vm"); + LOGGER.trace("Writing VM [{}]'s CPU and memory information into the metrics.", vmAsString); + metrics.setNumCPUs(info.nrVirtCpu); + metrics.setMemoryKBs(info.maxMem); + metrics.setTargetMemoryKBs(info.memory); + LOGGER.trace("Trying to get free memory for VM [{}].", vmAsString); + metrics.setIntFreeMemoryKBs(getMemoryFreeInKBs(dm)); + + if (oldStats != null) { + LOGGER.debug("Old stats exist for VM [{}]; therefore, the utilization will be calculated.", vmAsString); + + LOGGER.trace("Calculating CPU utilization for VM [{}].", vmAsString); + final Calendar now = Calendar.getInstance(); + long elapsedTime = now.getTimeInMillis() - oldStats.getTimestamp().getTimeInMillis(); + double utilization = (info.cpuTime - oldStats.getCpuTime()) / ((double) elapsedTime * 1000000 * info.nrVirtCpu); + if (utilization > 0) { + metrics.setCPUUtilization(utilization * 100); + } + + LOGGER.trace("Calculating network utilization for VM [{}].", vmAsString); + final double deltarx = newStats.getNetworkReadKBs() - oldStats.getNetworkReadKBs(); + if (deltarx > 0) { + metrics.setNetworkReadKBs(deltarx); + } + final double deltatx = newStats.getNetworkWriteKBs() - oldStats.getNetworkWriteKBs(); + if (deltatx > 0) { + metrics.setNetworkWriteKBs(deltatx); + } + + LOGGER.trace("Calculating disk utilization for VM [{}].", vmAsString); + final double deltaiord = newStats.getDiskReadIOs() - oldStats.getDiskReadIOs(); + if (deltaiord > 0) { + metrics.setDiskReadIOs(deltaiord); + } + final double deltaiowr = newStats.getDiskWriteIOs() - oldStats.getDiskWriteIOs(); + if (deltaiowr > 0) { + metrics.setDiskWriteIOs(deltaiowr); + } + final double deltabytesrd = newStats.getDiskReadKBs() - oldStats.getDiskReadKBs(); + if (deltabytesrd > 0) { + metrics.setDiskReadKBs(deltabytesrd); + } + final double deltabyteswr = newStats.getDiskWriteKBs() - oldStats.getDiskWriteKBs(); + if (deltabyteswr > 0) { + metrics.setDiskWriteKBs(deltabyteswr); + } + } + + String metricsAsString = new ReflectionToStringBuilder(metrics, ToStringStyle.JSON_STYLE).setExcludeFieldNames("vmId", "vmUuid").toString(); + LOGGER.debug("Calculated metrics for VM [{}]: [{}].", vmAsString, metricsAsString); + + return metrics; + } + /** * This method retrieves the memory statistics from the domain given as parameters. * If no memory statistic is found, it will return {@link NumberUtils#LONG_MINUS_ONE} as the value of free memory in the domain. @@ -4520,10 +4612,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv */ protected long getMemoryFreeInKBs(Domain dm) throws LibvirtException { MemoryStatistic[] memoryStats = dm.memoryStats(NUMMEMSTATS); - - if(LOGGER.isTraceEnabled()){ - LOGGER.trace(String.format("Retrieved memory statistics (information about tags can be found on the libvirt documentation):", ArrayUtils.toString(memoryStats))); - } + LOGGER.trace("Retrieved memory statistics (information about tags can be found on the libvirt documentation): {}.", + () -> Stream.of(memoryStats).map(stat -> stat.toString().trim().replace("\n", ",")).collect(Collectors.joining("},{", "[{", "}]"))); long freeMemory = NumberUtils.LONG_MINUS_ONE; @@ -5163,6 +5253,48 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv return false; } + public boolean hostSupportsInstanceConversion() { + int exitValue = Script.runSimpleBashScriptForExitValue(INSTANCE_CONVERSION_SUPPORTED_CHECK_CMD); + if (isUbuntuHost() && exitValue == 0) { + exitValue = Script.runSimpleBashScriptForExitValue(UBUNTU_NBDKIT_PKG_CHECK_CMD); + } + return exitValue == 0; + } + + public boolean hostSupportsWindowsGuestConversion() { + if (isUbuntuHost()) { + int exitValue = Script.runSimpleBashScriptForExitValue(UBUNTU_WINDOWS_GUEST_CONVERSION_SUPPORTED_CHECK_CMD); + return exitValue == 0; + } + int exitValue = Script.runSimpleBashScriptForExitValue(WINDOWS_GUEST_CONVERSION_SUPPORTED_CHECK_CMD); + return exitValue == 0; + } + + public boolean hostSupportsOvfExport() { + int exitValue = Script.runSimpleBashScriptForExitValue(OVF_EXPORT_SUPPORTED_CHECK_CMD); + return exitValue == 0; + } + + public boolean ovfExportToolSupportsParallelThreads() { + String ovfExportToolVersion = Script.runSimpleBashScript(OVF_EXPORT_TOOl_GET_VERSION_CMD); + if (StringUtils.isBlank(ovfExportToolVersion)) { + return false; + } + String[] ovfExportToolVersions = ovfExportToolVersion.trim().split("\\."); + if (ovfExportToolVersions.length > 1) { + try { + int majorVersion = Integer.parseInt(ovfExportToolVersions[0]); + int minorVersion = Integer.parseInt(ovfExportToolVersions[1]); + //ovftool version >= 4.4 supports parallel threads + if (majorVersion > 4 || (majorVersion == 4 && minorVersion >= 4)) { + return true; + } + } catch (NumberFormatException ignored) { + } + } + return false; + } + protected void setCpuTopology(CpuModeDef cmd, int vCpusInDef, Map details) { if (!enableManuallySettingCpuTopologyOnKvmVm) { LOGGER.debug(String.format("Skipping manually setting CPU topology on VM's XML due to it is disabled in agent.properties {\"property\": \"%s\", \"value\": %s}.", @@ -5379,20 +5511,31 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv /* Scp volume from remote host to local directory */ - public String copyVolume(String srcIp, String username, String password, String localDir, String remoteFile, String tmpPath) { + public String copyVolume(String srcIp, String username, String password, String localDir, String remoteFile, String tmpPath, int timeoutInSecs) { + String outputFile = UUID.randomUUID().toString(); try { - String outputFile = UUID.randomUUID().toString(); StringBuilder command = new StringBuilder("qemu-img convert -O qcow2 "); command.append(remoteFile); - command.append(" "+tmpPath); + command.append(" " + tmpPath); command.append(outputFile); - logger.debug("Converting remoteFile: "+remoteFile); - SshHelper.sshExecute(srcIp, 22, username, null, password, command.toString()); - logger.debug("Copying remoteFile to: "+localDir); - SshHelper.scpFrom(srcIp, 22, username, null, password, localDir, tmpPath+outputFile); - logger.debug("Successfully copyied remoteFile to: "+localDir+"/"+outputFile); + logger.debug(String.format("Converting remote disk file: %s, output file: %s%s (timeout: %d secs)", remoteFile, tmpPath, outputFile, timeoutInSecs)); + SshHelper.sshExecute(srcIp, 22, username, null, password, command.toString(), timeoutInSecs * 1000); + logger.debug("Copying converted remote disk file " + outputFile + " to: " + localDir); + SshHelper.scpFrom(srcIp, 22, username, null, password, localDir, tmpPath + outputFile); + logger.debug("Successfully copied converted remote disk file to: " + localDir + "/" + outputFile); return outputFile; } catch (Exception e) { + try { + String deleteRemoteConvertedFileCmd = String.format("rm -f %s%s", tmpPath, outputFile); + SshHelper.sshExecute(srcIp, 22, username, null, password, deleteRemoteConvertedFileCmd); + } catch (Exception ignored) { + } + + try { + FileUtils.deleteQuietly(new File(localDir + "/" + outputFile)); + } catch (Exception ignored) { + } + throw new RuntimeException(e); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtExtendedVmStatsEntry.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtExtendedVmStatsEntry.java new file mode 100644 index 00000000000..8e0e4a2ae27 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtExtendedVmStatsEntry.java @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.hypervisor.kvm.resource; + +import com.cloud.agent.api.VmStatsEntry; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + +import java.util.Calendar; + +public class LibvirtExtendedVmStatsEntry extends VmStatsEntry { + private long cpuTime; + private Calendar timestamp; + + public LibvirtExtendedVmStatsEntry() { + } + + public long getCpuTime() { + return cpuTime; + } + + public void setCpuTime(long cpuTime) { + this.cpuTime = cpuTime; + } + + public Calendar getTimestamp() { + return timestamp; + } + + public void setTimestamp(Calendar timestamp) { + this.timestamp = timestamp; + } + + @Override + public String toString() { + return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "cpuTime", "networkWriteKBs", "networkReadKBs", "diskReadIOs", "diskWriteIOs", "diskReadKBs", "diskWriteKBs"); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDef.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDef.java index ff44c8df2fa..09ee45d5908 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDef.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDef.java @@ -16,6 +16,12 @@ // under the License. package com.cloud.hypervisor.kvm.resource; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.commons.collections.CollectionUtils; + public class LibvirtStoragePoolDef { public enum PoolType { ISCSI("iscsi"), NETFS("netfs"), loggerICAL("logical"), DIR("dir"), RBD("rbd"), GLUSTERFS("glusterfs"), POWERFLEX("powerflex"); @@ -55,6 +61,7 @@ public class LibvirtStoragePoolDef { private String _authUsername; private AuthenticationType _authType; private String _secretUuid; + private Set _nfsMountOpts = new HashSet<>(); public LibvirtStoragePoolDef(PoolType type, String poolName, String uuid, String host, int port, String dir, String targetPath) { _poolType = type; @@ -75,6 +82,15 @@ public class LibvirtStoragePoolDef { _targetPath = targetPath; } + public LibvirtStoragePoolDef(PoolType type, String poolName, String uuid, String host, String dir, String targetPath, List nfsMountOpts) { + this(type, poolName, uuid, host, dir, targetPath); + if (CollectionUtils.isNotEmpty(nfsMountOpts)) { + for (String nfsMountOpt : nfsMountOpts) { + this._nfsMountOpts.add(nfsMountOpt); + } + } + } + public LibvirtStoragePoolDef(PoolType type, String poolName, String uuid, String sourceHost, int sourcePort, String dir, String authUsername, AuthenticationType authType, String secretUuid) { _poolType = type; @@ -124,69 +140,98 @@ public class LibvirtStoragePoolDef { return _authType; } + public Set getNfsMountOpts() { + return _nfsMountOpts; + } + @Override public String toString() { StringBuilder storagePoolBuilder = new StringBuilder(); - if (_poolType == PoolType.GLUSTERFS) { - /* libvirt mounts a Gluster volume, similar to NFS */ - storagePoolBuilder.append("\n"); - } else { - storagePoolBuilder.append("\n"); + String poolTypeXML; + switch (_poolType) { + case NETFS: + if (_nfsMountOpts != null) { + poolTypeXML = "netfs' xmlns:fs='http://libvirt.org/schemas/storagepool/fs/1.0"; + } else { + poolTypeXML = _poolType.toString(); + } + break; + case GLUSTERFS: + /* libvirt mounts a Gluster volume, similar to NFS */ + poolTypeXML = "netfs"; + break; + default: + poolTypeXML = _poolType.toString(); } + storagePoolBuilder.append("\n"); + storagePoolBuilder.append("" + _poolName + "\n"); if (_uuid != null) storagePoolBuilder.append("" + _uuid + "\n"); - if (_poolType == PoolType.NETFS) { - storagePoolBuilder.append("\n"); - storagePoolBuilder.append("\n"); - storagePoolBuilder.append("\n"); - storagePoolBuilder.append("\n"); - } - if (_poolType == PoolType.RBD) { - storagePoolBuilder.append("\n"); - for (String sourceHost : _sourceHost.split(",")) { + + switch (_poolType) { + case NETFS: + storagePoolBuilder.append("\n"); + storagePoolBuilder.append("\n"); + storagePoolBuilder.append("\n"); + storagePoolBuilder.append("\n"); + break; + + case RBD: + storagePoolBuilder.append("\n"); + for (String sourceHost : _sourceHost.split(",")) { + storagePoolBuilder.append("\n"); + } + + storagePoolBuilder.append("" + _sourceDir + "\n"); + if (_authUsername != null) { + storagePoolBuilder.append("\n"); + storagePoolBuilder.append("\n"); + storagePoolBuilder.append("\n"); + } + storagePoolBuilder.append("\n"); + break; + + case GLUSTERFS: + storagePoolBuilder.append("\n"); storagePoolBuilder.append("\n"); - } + storagePoolBuilder.append("\n"); + storagePoolBuilder.append("\n"); + storagePoolBuilder.append("\n"); + break; + } - storagePoolBuilder.append("" + _sourceDir + "\n"); - if (_authUsername != null) { - storagePoolBuilder.append("\n"); - storagePoolBuilder.append("\n"); - storagePoolBuilder.append("\n"); - } - storagePoolBuilder.append("\n"); - } - if (_poolType == PoolType.GLUSTERFS) { - storagePoolBuilder.append("\n"); - storagePoolBuilder.append("\n"); - storagePoolBuilder.append("\n"); - storagePoolBuilder.append("\n"); - storagePoolBuilder.append("\n"); - } if (_poolType != PoolType.RBD && _poolType != PoolType.POWERFLEX) { storagePoolBuilder.append("\n"); storagePoolBuilder.append("" + _targetPath + "\n"); storagePoolBuilder.append("\n"); } + if (_poolType == PoolType.NETFS && _nfsMountOpts != null) { + storagePoolBuilder.append("\n"); + for (String options : _nfsMountOpts) { + storagePoolBuilder.append("\n"); + } + storagePoolBuilder.append("\n"); + } storagePoolBuilder.append("\n"); return storagePoolBuilder.toString(); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParser.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParser.java index 30616e04798..430e4ef851f 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParser.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParser.java @@ -38,6 +38,19 @@ import org.xml.sax.SAXException; public class LibvirtStoragePoolXMLParser { protected Logger logger = LogManager.getLogger(getClass()); + private List getNFSMountOptsFromRootElement(Element rootElement) { + List nfsMountOpts = new ArrayList<>(); + Element mountOpts = (Element) rootElement.getElementsByTagName("fs:mount_opts").item(0); + if (mountOpts != null) { + NodeList options = mountOpts.getElementsByTagName("fs:option"); + for (int i = 0; i < options.getLength(); i++) { + Element option = (Element) options.item(i); + nfsMountOpts.add(option.getAttribute("name")); + } + } + return nfsMountOpts; + } + public LibvirtStoragePoolDef parseStoragePoolXML(String poolXML) { DocumentBuilder builder; try { @@ -95,11 +108,15 @@ public class LibvirtStoragePoolXMLParser { poolName, uuid, host, port, path, targetPath); } else { String path = getAttrValue("dir", "path", source); - Element target = (Element)rootElement.getElementsByTagName("target").item(0); String targetPath = getTagValue("path", target); - return new LibvirtStoragePoolDef(LibvirtStoragePoolDef.PoolType.valueOf(type.toUpperCase()), poolName, uuid, host, path, targetPath); + if (type.equalsIgnoreCase("netfs")) { + List nfsMountOpts = getNFSMountOptsFromRootElement(rootElement); + return new LibvirtStoragePoolDef(LibvirtStoragePoolDef.PoolType.valueOf(type.toUpperCase()), poolName, uuid, host, path, targetPath, nfsMountOpts); + } else { + return new LibvirtStoragePoolDef(LibvirtStoragePoolDef.PoolType.valueOf(type.toUpperCase()), poolName, uuid, host, path, targetPath); + } } } catch (ParserConfigurationException e) { logger.debug(e.toString()); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java index a0394430643..0f11c12f101 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java @@ -95,6 +95,8 @@ public class LibvirtVMDef { } private GuestType _type; + private String manufacturer; + private String product; private BootType _boottype; private BootMode _bootmode; private String _arch; @@ -124,6 +126,28 @@ public class LibvirtVMDef { return _type; } + public String getManufacturer() { + if (StringUtils.isEmpty(manufacturer)) { + return "Apache Software Foundation"; + } + return manufacturer; + } + + public void setManufacturer(String manufacturer) { + this.manufacturer = manufacturer; + } + + public String getProduct() { + if (StringUtils.isEmpty(product)) { + return "CloudStack KVM Hypervisor"; + } + return product; + } + + public void setProduct(String product) { + this.product = product; + } + public void setNvram(String nvram) { _nvram = nvram; } public void setNvramTemplate(String nvramTemplate) { _nvramTemplate = nvramTemplate; } @@ -182,9 +206,10 @@ public class LibvirtVMDef { guestDef.append("\n"); guestDef.append("\n"); - guestDef.append("Apache Software Foundation\n"); - guestDef.append("CloudStack " + _type.toString() + " Hypervisor\n"); + guestDef.append("" + getManufacturer() +"\n"); + guestDef.append("" + getProduct() + "\n"); guestDef.append("" + _uuid + "\n"); + guestDef.append("" + _uuid + "\n"); guestDef.append("\n"); guestDef.append("\n"); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckConvertInstanceCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckConvertInstanceCommandWrapper.java new file mode 100644 index 00000000000..d3ebb28b106 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckConvertInstanceCommandWrapper.java @@ -0,0 +1,53 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CheckConvertInstanceAnswer; +import com.cloud.agent.api.CheckConvertInstanceCommand; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; + +@ResourceWrapper(handles = CheckConvertInstanceCommand.class) +public class LibvirtCheckConvertInstanceCommandWrapper extends CommandWrapper { + + @Override + public Answer execute(CheckConvertInstanceCommand cmd, LibvirtComputingResource serverResource) { + if (!serverResource.hostSupportsInstanceConversion()) { + String msg = String.format("Cannot convert the instance from VMware as the virt-v2v binary is not found on host %s. " + + "Please install virt-v2v%s on the host before attempting the instance conversion.", serverResource.getPrivateIp(), serverResource.isUbuntuHost()? ", nbdkit" : ""); + logger.info(msg); + return new CheckConvertInstanceAnswer(cmd, false, msg); + } + + if (cmd.getCheckWindowsGuestConversionSupport() && !serverResource.hostSupportsWindowsGuestConversion()) { + String msg = String.format("Cannot convert the instance from VMware as the virtio-win package is not found on host %s. " + + "Please install virtio-win package on the host before attempting the windows guest instance conversion.", serverResource.getPrivateIp()); + logger.info(msg); + return new CheckConvertInstanceAnswer(cmd, false, msg); + } + + if (serverResource.hostSupportsOvfExport()) { + return new CheckConvertInstanceAnswer(cmd, true, true, ""); + } + + return new CheckConvertInstanceAnswer(cmd, true, ""); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConvertInstanceCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConvertInstanceCommandWrapper.java index bd6634c83a4..cc955e86d8a 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConvertInstanceCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConvertInstanceCommandWrapper.java @@ -18,6 +18,24 @@ // package com.cloud.hypervisor.kvm.resource.wrapper; +import java.io.BufferedInputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.URLEncoder; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.vm.UnmanagedInstanceTO; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.StringUtils; + import com.cloud.agent.api.Answer; import com.cloud.agent.api.ConvertInstanceAnswer; import com.cloud.agent.api.ConvertInstanceCommand; @@ -34,27 +52,11 @@ import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; import com.cloud.storage.Storage; +import com.cloud.utils.FileUtil; import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; -import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; -import org.apache.cloudstack.vm.UnmanagedInstanceTO; -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.StringUtils; - -import java.io.BufferedInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.net.URLEncoder; -import java.nio.charset.Charset; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import java.util.stream.Collectors; @ResourceWrapper(handles = ConvertInstanceCommand.class) public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper { @@ -62,8 +64,6 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper supportedInstanceConvertSourceHypervisors = List.of(Hypervisor.HypervisorType.VMware); - protected static final String checkIfConversionIsSupportedCommand = "which virt-v2v"; - @Override public Answer execute(ConvertInstanceCommand cmd, LibvirtComputingResource serverResource) { RemoteInstanceTO sourceInstance = cmd.getSourceInstance(); @@ -74,9 +74,9 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper 1 && !serverResource.ovfExportToolSupportsParallelThreads()) { + noOfThreads = 0; + } + ovfTemplateDirOnConversionLocation = UUID.randomUUID().toString(); + temporaryStoragePool.createFolder(ovfTemplateDirOnConversionLocation); + sourceOVFDirPath = String.format("%s/%s/", temporaryConvertPath, ovfTemplateDirOnConversionLocation); + ovfExported = exportOVAFromVMOnVcenter(exportInstanceOVAUrl, sourceOVFDirPath, noOfThreads, timeout); + if (!ovfExported) { + String err = String.format("Export OVA for the VM %s failed", sourceInstanceName); + logger.error(err); + return new ConvertInstanceAnswer(cmd, false, err); + } + sourceOVFDirPath = String.format("%s%s/", sourceOVFDirPath, sourceInstanceName); + } else { + ovfTemplateDirOnConversionLocation = cmd.getTemplateDirOnConversionLocation(); + sourceOVFDirPath = String.format("%s/%s/", temporaryConvertPath, ovfTemplateDirOnConversionLocation); + } + + logger.info(String.format("Attempting to convert the OVF %s of the instance %s from %s to KVM", ovfTemplateDirOnConversionLocation, sourceInstanceName, sourceHypervisorType)); + final String temporaryConvertUuid = UUID.randomUUID().toString(); boolean verboseModeEnabled = serverResource.isConvertInstanceVerboseModeEnabled(); try { - boolean result = performInstanceConversion(convertInstanceUrl, sourceInstanceName, temporaryPasswordFilePath, - temporaryConvertPath, temporaryConvertUuid, timeout, verboseModeEnabled); + boolean result = performInstanceConversion(sourceOVFDirPath, temporaryConvertPath, temporaryConvertUuid, + timeout, verboseModeEnabled); if (!result) { - String err = String.format("The virt-v2v conversion of the instance %s failed. " + - "Please check the agent logs for the virt-v2v output", sourceInstanceName); + String err = String.format("The virt-v2v conversion for the OVF %s failed. " + + "Please check the agent logs for the virt-v2v output", ovfTemplateDirOnConversionLocation); logger.error(err); return new ConvertInstanceAnswer(cmd, false, err); } @@ -130,8 +160,11 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper getTemporaryDisksFromParsedXml(KVMStoragePool pool, LibvirtDomainXMLParser xmlParser, String convertedBasePath) { List disksDefs = xmlParser.getDisks(); disksDefs = disksDefs.stream().filter(x -> x.getDiskType() == LibvirtVMDef.DiskDef.DiskType.FILE && @@ -201,12 +255,7 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper disks) { @@ -234,6 +283,11 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper getNfsStoragePoolHostAndPath(KVMStoragePool storagePool) { String sourceHostIp = null; String sourcePath = null; - String storagePoolMountPoint = Script.runSimpleBashScript(String.format("mount | grep %s", storagePool.getLocalPath())); + List commands = new ArrayList<>(); + commands.add(new String[]{Script.getExecutableAbsolutePath("mount")}); + commands.add(new String[]{Script.getExecutableAbsolutePath("grep"), storagePool.getLocalPath()}); + String storagePoolMountPoint = Script.executePipedCommands(commands, 0).second(); + logger.debug(String.format("NFS Storage pool: %s - local path: %s, mount point: %s", storagePool.getUuid(), storagePool.getLocalPath(), storagePoolMountPoint)); if (StringUtils.isNotEmpty(storagePoolMountPoint)) { String[] res = storagePoolMountPoint.strip().split(" "); res = res[0].split(":"); - sourceHostIp = res[0].strip(); - sourcePath = res[1].strip(); + if (res.length > 1) { + sourceHostIp = res[0].strip(); + sourcePath = res[1].strip(); + } } return new Pair<>(sourceHostIp, sourcePath); } - protected boolean performInstanceConversion(String convertInstanceUrl, String sourceInstanceName, - String temporaryPasswordFilePath, - String temporaryConvertFolder, - String temporaryConvertUuid, - long timeout, boolean verboseModeEnabled) { + private boolean exportOVAFromVMOnVcenter(String vmExportUrl, + String targetOvfDir, + int noOfThreads, + long timeout) { + Script script = new Script("ovftool", timeout, logger); + script.add("--noSSLVerify"); + if (noOfThreads > 1) { + script.add(String.format("--parallelThreads=%s", noOfThreads)); + } + script.add(vmExportUrl); + script.add(targetOvfDir); + + String logPrefix = "export ovf"; + OutputInterpreter.LineByLineOutputLogger outputLogger = new OutputInterpreter.LineByLineOutputLogger(logger, logPrefix); + script.execute(outputLogger); + int exitValue = script.getExitValue(); + return exitValue == 0; + } + + protected boolean performInstanceConversion(String sourceOVFDirPath, + String temporaryConvertFolder, + String temporaryConvertUuid, + long timeout, boolean verboseModeEnabled) { Script script = new Script("virt-v2v", timeout, logger); script.add("--root", "first"); - script.add("-ic", convertInstanceUrl); - script.add(sourceInstanceName); - script.add("--password-file", temporaryPasswordFilePath); + script.add("-i", "ova"); + script.add(sourceOVFDirPath); script.add("-o", "local"); script.add("-os", temporaryConvertFolder); script.add("-of", "qcow2"); @@ -332,44 +409,13 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper %s", password, passwordFile)); - return passwordFile; - } - - private String getConvertInstanceUrl(RemoteInstanceTO sourceInstance) { - String url = null; - if (sourceInstance.getHypervisorType() == Hypervisor.HypervisorType.VMware) { - url = getConvertInstanceUrlFromVmware(sourceInstance); - } - return url; - } - - private String getConvertInstanceUrlFromVmware(RemoteInstanceTO vmwareInstance) { - String vcenter = vmwareInstance.getVcenterHost(); - String datacenter = vmwareInstance.getDatacenterName(); - String username = vmwareInstance.getVcenterUsername(); - String host = vmwareInstance.getHostName(); - String cluster = vmwareInstance.getClusterName(); - - String encodedUsername = encodeUsername(username); - return String.format("vpx://%s@%s/%s/%s/%s?no_verify=1", - encodedUsername, vcenter, datacenter, cluster, host); - } protected LibvirtDomainXMLParser parseMigratedVMXmlDomain(String installPath) throws IOException { String xmlPath = String.format("%s.xml", installPath); if (!new File(xmlPath).exists()) { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyRemoteVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyRemoteVolumeCommandWrapper.java index 025a5ed192c..e6ec05fec23 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyRemoteVolumeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyRemoteVolumeCommandWrapper.java @@ -43,7 +43,6 @@ public final class LibvirtCopyRemoteVolumeCommandWrapper extends CommandWrapper< @Override public Answer execute(final CopyRemoteVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) { - String result = null; String srcIp = command.getRemoteIp(); String username = command.getUsername(); String password = command.getPassword(); @@ -53,23 +52,25 @@ public final class LibvirtCopyRemoteVolumeCommandWrapper extends CommandWrapper< KVMStoragePoolManager poolMgr = libvirtComputingResource.getStoragePoolMgr(); KVMStoragePool pool = poolMgr.getStoragePool(storageFilerTO.getType(), storageFilerTO.getUuid()); String dstPath = pool.getLocalPath(); + int timeoutInSecs = command.getWait(); try { if (storageFilerTO.getType() == Storage.StoragePoolType.Filesystem || storageFilerTO.getType() == Storage.StoragePoolType.NetworkFilesystem) { - String filename = libvirtComputingResource.copyVolume(srcIp, username, password, dstPath, srcFile, tmpPath); - logger.debug("Volume Copy Successful"); + String filename = libvirtComputingResource.copyVolume(srcIp, username, password, dstPath, srcFile, tmpPath, timeoutInSecs); + logger.debug("Volume " + srcFile + " copy successful, copied to file: " + filename); final KVMPhysicalDisk vol = pool.getPhysicalDisk(filename); final String path = vol.getPath(); long size = getVirtualSizeFromFile(path); - return new CopyRemoteVolumeAnswer(command, "", filename, size); + return new CopyRemoteVolumeAnswer(command, "", filename, size); } else { - return new Answer(command, false, "Unsupported Storage Pool"); + String msg = "Unsupported storage pool type: " + storageFilerTO.getType().toString() + ", only local and NFS pools are supported"; + return new Answer(command, false, msg); } - } catch (final Exception e) { - logger.error("Error while copying file from remote host: "+ e.getMessage()); - return new Answer(command, false, result); + logger.error("Error while copying volume file from remote host: " + e.getMessage(), e); + String msg = "Failed to copy volume due to: " + e.getMessage(); + return new Answer(command, false, msg); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDeleteBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDeleteBackupCommandWrapper.java new file mode 100644 index 00000000000..4772d3b472c --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDeleteBackupCommandWrapper.java @@ -0,0 +1,63 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.Pair; +import com.cloud.utils.script.Script; +import org.apache.cloudstack.backup.BackupAnswer; +import org.apache.cloudstack.backup.DeleteBackupCommand; + +import java.util.ArrayList; +import java.util.List; + +@ResourceWrapper(handles = DeleteBackupCommand.class) +public class LibvirtDeleteBackupCommandWrapper extends CommandWrapper { + @Override + public Answer execute(DeleteBackupCommand command, LibvirtComputingResource libvirtComputingResource) { + final String backupPath = command.getBackupPath(); + final String backupRepoType = command.getBackupRepoType(); + final String backupRepoAddress = command.getBackupRepoAddress(); + final String mountOptions = command.getMountOptions(); + + List commands = new ArrayList<>(); + commands.add(new String[]{ + libvirtComputingResource.getNasBackupPath(), + "-o", "delete", + "-t", backupRepoType, + "-s", backupRepoAddress, + "-m", mountOptions, + "-p", backupPath + }); + + Pair result = Script.executePipedCommands(commands, libvirtComputingResource.getCmdsTimeout()); + + logger.debug(String.format("Backup delete result: %s , exit code: %s", result.second(), result.first())); + + if (result.first() != 0) { + logger.debug(String.format("Failed to delete VM backup: %s", result.second())); + return new BackupAnswer(command, false, result.second()); + } + return new BackupAnswer(command, true, null); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDeleteVMSnapshotCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDeleteVMSnapshotCommandWrapper.java index 45b0c179938..58a74d6e0f6 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDeleteVMSnapshotCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDeleteVMSnapshotCommandWrapper.java @@ -19,6 +19,9 @@ package com.cloud.hypervisor.kvm.resource.wrapper; +import java.util.ArrayList; +import java.util.List; + import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.libvirt.Connect; @@ -35,8 +38,8 @@ import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; -import com.cloud.storage.Volume; import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.Volume; import com.cloud.utils.script.Script; @ResourceWrapper(handles = DeleteVMSnapshotCommand.class) @@ -94,12 +97,20 @@ public final class LibvirtDeleteVMSnapshotCommandWrapper extends CommandWrapper< PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO) rootVolume.getDataStore(); KVMPhysicalDisk rootDisk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), rootVolume.getPath()); - String qemu_img_snapshot = Script.runSimpleBashScript("qemu-img snapshot -l " + rootDisk.getPath() + " | tail -n +3 | awk -F ' ' '{print $2}' | grep ^" + cmd.getTarget().getSnapshotName() + "$"); + String qemuImgPath = Script.getExecutableAbsolutePath("qemu-img"); + List commands = new ArrayList<>(); + commands.add(new String[]{qemuImgPath, "snapshot", "-l", sanitizeBashCommandArgument(rootDisk.getPath())}); + commands.add(new String[]{Script.getExecutableAbsolutePath("tail"), "-n", "+3"}); + commands.add(new String[]{Script.getExecutableAbsolutePath("awk"), "-F", " ", "{print $2}"}); + commands.add(new String[]{Script.getExecutableAbsolutePath("grep"), "^" + sanitizeBashCommandArgument(cmd.getTarget().getSnapshotName()) + "$"}); + String qemu_img_snapshot = Script.executePipedCommands(commands, 0).second(); if (qemu_img_snapshot == null) { logger.info("Cannot find snapshot " + cmd.getTarget().getSnapshotName() + " in file " + rootDisk.getPath() + ", return true"); return new DeleteVMSnapshotAnswer(cmd, cmd.getVolumeTOs()); } - int result = Script.runSimpleBashScriptForExitValue("qemu-img snapshot -d " + cmd.getTarget().getSnapshotName() + " " + rootDisk.getPath()); + int result = Script.executeCommandForExitValue(qemuImgPath, "snapshot", "-d", + sanitizeBashCommandArgument(cmd.getTarget().getSnapshotName()), + sanitizeBashCommandArgument(rootDisk.getPath())); if (result != 0) { return new DeleteVMSnapshotAnswer(cmd, false, "Delete VM Snapshot Failed due to can not remove snapshot from image file " + rootDisk.getPath() + " : " + result); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java index ead294ad05f..114b27d3a5b 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java @@ -47,37 +47,38 @@ public final class LibvirtGetRemoteVmsCommandWrapper extends CommandWrapper unmanagedInstances = new HashMap<>(); try { Connect conn = LibvirtConnection.getConnection(hypervisorURI); final List allVmNames = libvirtComputingResource.getAllVmNames(conn); + logger.info(String.format("Found %d VMs on the remote host %s", allVmNames.size(), remoteIp)); for (String name : allVmNames) { final Domain domain = libvirtComputingResource.getDomain(conn, name); - final DomainInfo.DomainState ps = domain.getInfo().state; - final VirtualMachine.PowerState state = libvirtComputingResource.convertToPowerState(ps); - logger.debug("VM " + domain.getName() + ": powerstate = " + ps + "; vm state=" + state.toString()); + logger.debug(String.format("Remote VM %s - powerstate: %s, state: %s", domain.getName(), ps.toString(), state.toString())); if (state == VirtualMachine.PowerState.PowerOff) { try { UnmanagedInstanceTO instance = getUnmanagedInstance(libvirtComputingResource, domain, conn); unmanagedInstances.put(instance.getName(), instance); } catch (Exception e) { - logger.error("Error while fetching instance details", e); + logger.error("Couldn't fetch remote VM " + domain.getName() + " details, due to: " + e.getMessage(), e); } } domain.free(); } - logger.debug("Found Vms: "+ unmanagedInstances.size()); - return new GetRemoteVmsAnswer(command, "", unmanagedInstances); + logger.debug("Found " + unmanagedInstances.size() + " stopped VMs on remote host " + remoteIp); + return new GetRemoteVmsAnswer(command, "", unmanagedInstances); } catch (final LibvirtException e) { - logger.error("Error while listing stopped Vms on remote host: "+ e.getMessage()); - return new Answer(command, false, result); + logger.error("Failed to list stopped VMs on remote host " + remoteIp + ", due to: " + e.getMessage(), e); + if (e.getMessage().toLowerCase().contains("connection refused")) { + return new Answer(command, false, "Unable to connect to remote host " + remoteIp + ", please check the libvirtd tcp connectivity and retry"); + } + return new Answer(command, false, "Unable to list stopped VMs on remote host " + remoteIp + ", due to: " + e.getMessage()); } } @@ -103,8 +104,8 @@ public final class LibvirtGetRemoteVmsCommandWrapper extends CommandWrapper sourceHostPath = getSourceHostPath(libvirtComputingResource, diskDef.getSourcePath()); if (sourceHostPath != null) { disk.setDatastoreHost(sourceHostPath.first()); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmIpAddressCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmIpAddressCommandWrapper.java index 227e68872da..61c20f96bac 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmIpAddressCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmIpAddressCommandWrapper.java @@ -19,6 +19,9 @@ package com.cloud.hypervisor.kvm.resource.wrapper; +import java.util.ArrayList; +import java.util.List; + import com.cloud.agent.api.Answer; import com.cloud.agent.api.GetVmIpAddressCommand; import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; @@ -35,31 +38,51 @@ public final class LibvirtGetVmIpAddressCommandWrapper extends CommandWrapper commands = new ArrayList<>(); + final String virt_ls_path = Script.getExecutableAbsolutePath("virt-ls"); + final String virt_cat_path = Script.getExecutableAbsolutePath("virt-cat"); + final String virt_win_reg_path = Script.getExecutableAbsolutePath("virt-win-reg"); + final String tail_path = Script.getExecutableAbsolutePath("tail"); + final String grep_path = Script.getExecutableAbsolutePath("grep"); + final String awk_path = Script.getExecutableAbsolutePath("awk"); + final String sed_path = Script.getExecutableAbsolutePath("sed"); if(!command.isWindows()) { //List all dhcp lease files inside guestVm - String leasesList = Script.runSimpleBashScript(new StringBuilder().append("virt-ls ").append(command.getVmName()) - .append(" /var/lib/dhclient/ | grep .*\\*.leases").toString()); + commands.add(new String[]{virt_ls_path, sanitizedVmName, "/var/lib/dhclient/"}); + commands.add(new String[]{grep_path, ".*\\*.leases"}); + String leasesList = Script.executePipedCommands(commands, 0).second(); if(leasesList != null) { String[] leasesFiles = leasesList.split("\n"); for(String leaseFile : leasesFiles){ - //Read from each dhclient lease file inside guest Vm using virt-cat libguestfs ulitiy - String ipAddr = Script.runSimpleBashScript(new StringBuilder().append("virt-cat ").append(command.getVmName()) - .append(" /var/lib/dhclient/" + leaseFile + " | tail -16 | grep 'fixed-address' | awk '{print $2}' | sed -e 's/;//'").toString()); + //Read from each dhclient lease file inside guest Vm using virt-cat libguestfs utility + commands = new ArrayList<>(); + commands.add(new String[]{virt_cat_path, sanitizedVmName, "/var/lib/dhclient/" + leaseFile}); + commands.add(new String[]{tail_path, "-16"}); + commands.add(new String[]{grep_path, "fixed-address"}); + commands.add(new String[]{awk_path, "{print $2}"}); + commands.add(new String[]{sed_path, "-e", "s/;//"}); + String ipAddr = Script.executePipedCommands(commands, 0).second(); // Check if the IP belongs to the network - if((ipAddr != null) && NetUtils.isIpWithInCidrRange(ipAddr, networkCidr)){ + if((ipAddr != null) && NetUtils.isIpWithInCidrRange(ipAddr, networkCidr)) { ip = ipAddr; break; } - logger.debug("GetVmIp: "+command.getVmName()+ " Ip: "+ipAddr+" does not belong to network "+networkCidr); + logger.debug("GetVmIp: "+ vmName + " Ip: "+ipAddr+" does not belong to network "+networkCidr); } } } else { // For windows, read from guest Vm registry using virt-win-reg libguestfs ulitiy. Registry Path: HKEY_LOCAL_MACHINE\SYSTEM\ControlSet001\Services\Tcpip\Parameters\Interfaces\\DhcpIPAddress - String ipList = Script.runSimpleBashScript(new StringBuilder().append("virt-win-reg --unsafe-printable-strings ").append(command.getVmName()) - .append(" 'HKEY_LOCAL_MACHINE\\SYSTEM\\ControlSet001\\Services\\Tcpip\\Parameters\\Interfaces' | grep DhcpIPAddress | awk -F : '{print $2}' | sed -e 's/^\"//' -e 's/\"$//'").toString()); + commands = new ArrayList<>(); + commands.add(new String[]{virt_win_reg_path, "--unsafe-printable-strings", sanitizedVmName, "HKEY_LOCAL_MACHINE\\SYSTEM\\ControlSet001\\Services\\Tcpip\\Parameters\\Interfaces"}); + commands.add(new String[]{grep_path, "DhcpIPAddress"}); + commands.add(new String[]{awk_path, "-F", ":", "{print $2}"}); + commands.add(new String[]{sed_path, "-e", "s/^\"//", "-e", "s/\"$//"}); + String ipList = Script.executePipedCommands(commands, 0).second(); if(ipList != null) { - logger.debug("GetVmIp: "+command.getVmName()+ "Ips: "+ipList); + logger.debug("GetVmIp: "+ vmName + "Ips: "+ipList); String[] ips = ipList.split("\n"); for (String ipAddr : ips){ // Check if the IP belongs to the network @@ -67,13 +90,13 @@ public final class LibvirtGetVmIpAddressCommandWrapper extends CommandWrapper { + + @Override + public Answer execute(final GetVolumeStatCommand cmd, final LibvirtComputingResource libvirtComputingResource) { + try { + String volumePath = cmd.getVolumePath(); + StoragePoolType poolType = cmd.getPoolType(); + String poolUuid = cmd.getPoolUuid(); + + KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); + KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(poolType, poolUuid); + if (primaryPool == null) { + String msg = "Can't get volume stats as pool details unavailable for volume: " + volumePath + " on the storage pool: " + poolUuid; + return new GetVolumeStatAnswer(cmd, false, msg); + } + + KVMPhysicalDisk disk = primaryPool.getPhysicalDisk(volumePath); + if (disk == null) { + String msg = "Can't get volume stats as disk details unavailable for volume: " + volumePath + " on the storage pool: " + poolUuid; + return new GetVolumeStatAnswer(cmd, false, msg); + } + + return new GetVolumeStatAnswer(cmd, disk.getSize(), disk.getVirtualSize()); + } catch (CloudRuntimeException e) { + logger.error("Can't get volume stats, due to: " + e.getMessage(), e); + return new GetVolumeStatAnswer(cmd, false, "Can't get volume stats, due to: " + e.getMessage()); + } + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java index b97cb666de0..e15a3287692 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java @@ -67,6 +67,7 @@ import com.cloud.agent.api.Answer; import com.cloud.agent.api.MigrateAnswer; import com.cloud.agent.api.MigrateCommand; import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo; +import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.DpdkTO; import com.cloud.agent.api.to.VirtualMachineTO; @@ -90,6 +91,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper disks, String vmName) { + String oldIsoVolumePath = null; + for (DiskDef disk : disks) { + if (DiskDef.DeviceType.CDROM.equals(disk.getDeviceType()) + && CDROM_LABEL.equals(disk.getDiskLabel()) + && disk.getDiskPath() != null) { + oldIsoVolumePath = disk.getDiskPath(); + break; + } + } + return oldIsoVolumePath; + } + + private String getNewVolumePathForCdrom(LibvirtComputingResource libvirtComputingResource, Connect conn, VirtualMachineTO to) throws LibvirtException, URISyntaxException { + DiskTO newDisk = null; + for (DiskTO disk : to.getDisks()) { + DataTO data = disk.getData(); + if (disk.getDiskSeq() == 3 && data != null && data.getPath() != null) { + newDisk = disk; + break; + } + } + + String newIsoVolumePath = null; + if (newDisk != null) { + newIsoVolumePath = libvirtComputingResource.getVolumePath(conn, newDisk); + } + return newIsoVolumePath; + } + + protected String replaceCdromIsoPath(String xmlDesc, String vmName, String oldIsoVolumePath, String newIsoVolumePath) throws IOException, ParserConfigurationException, TransformerException, SAXException { + InputStream in = IOUtils.toInputStream(xmlDesc); + + DocumentBuilderFactory docFactory = ParserUtils.getSaferDocumentBuilderFactory(); + DocumentBuilder docBuilder = docFactory.newDocumentBuilder(); + Document doc = docBuilder.parse(in); + + // Get the root element + Node domainNode = doc.getFirstChild(); + + NodeList domainChildNodes = domainNode.getChildNodes(); + + for (int i = 0; i < domainChildNodes.getLength(); i++) { + Node domainChildNode = domainChildNodes.item(i); + if ("devices".equals(domainChildNode.getNodeName())) { + NodeList devicesChildNodes = domainChildNode.getChildNodes(); + for (int x = 0; x < devicesChildNodes.getLength(); x++) { + Node deviceChildNode = devicesChildNodes.item(x); + if ("disk".equals(deviceChildNode.getNodeName())) { + Node diskNode = deviceChildNode; + NodeList diskChildNodes = diskNode.getChildNodes(); + for (int z = 0; z < diskChildNodes.getLength(); z++) { + Node diskChildNode = diskChildNodes.item(z); + if ("source".equals(diskChildNode.getNodeName())) { + NamedNodeMap sourceNodeAttributes = diskChildNode.getAttributes(); + Node sourceNodeAttribute = sourceNodeAttributes.getNamedItem("file"); + if (oldIsoVolumePath != null && sourceNodeAttribute != null + && oldIsoVolumePath.equals(sourceNodeAttribute.getNodeValue())) { + diskNode.removeChild(diskChildNode); + Element newChildSourceNode = doc.createElement("source"); + newChildSourceNode.setAttribute("file", newIsoVolumePath); + diskNode.appendChild(newChildSourceNode); + logger.debug(String.format("Replaced ISO path [%s] with [%s] in VM [%s] XML configuration.", oldIsoVolumePath, newIsoVolumePath, vmName)); + return getXml(doc); + } + } + } + } + } + } + } + + return getXml(doc); + } + private String getPathFromSourceText(Set paths, String sourceText) { if (paths != null && StringUtils.isNotBlank(sourceText)) { for (String path : paths) { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java index 00f627d0528..0221496b79c 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java @@ -140,7 +140,6 @@ public class LibvirtMigrateVolumeCommandWrapper extends CommandWrapper 0) { DomainBlockJobInfo blockJobInfo = dm.getBlockJobInfo(diskLabel, 0); if (blockJobInfo != null) { - logger.debug(String.format("Volume %s : %s block copy progress: %s%% current value:%s end value:%s", diskLabel, srcPath, (blockJobInfo.end == 0)? 0 : 100*(blockJobInfo.cur / (double) blockJobInfo.end), blockJobInfo.cur, blockJobInfo.end)); + blockCopyProgress = (blockJobInfo.end == 0)? blockCopyProgress : 100 * (blockJobInfo.cur / (double) blockJobInfo.end); + logger.debug(String.format("Volume %s : %s, block copy progress: %s%%, current value: %s end value: %s, job info - type: %s, bandwidth: %s", + diskLabel, srcPath, blockCopyProgress, blockJobInfo.cur, blockJobInfo.end, blockJobInfo.type, blockJobInfo.bandwidth)); if (blockJobInfo.cur == blockJobInfo.end) { - logger.info(String.format("Block copy completed for the volume %s : %s", diskLabel, srcPath)); - dm.blockJobAbort(diskLabel, Domain.BlockJobAbortFlags.PIVOT); - if (StringUtils.isNotEmpty(srcSecretUUID)) { - libvirtComputingResource.removeLibvirtVolumeSecret(conn, srcSecretUUID); + if (blockJobInfo.end > 0) { + logger.info(String.format("Block copy completed for the volume %s : %s", diskLabel, srcPath)); + dm.blockJobAbort(diskLabel, Domain.BlockJobAbortFlags.PIVOT); + if (StringUtils.isNotEmpty(srcSecretUUID)) { + libvirtComputingResource.removeLibvirtVolumeSecret(conn, srcSecretUUID); + } + break; + } else { + // cur = 0, end = 0 - at this point, disk does not have an active block job (so, no need to abort job) + String msg = String.format("No active block copy job for the volume %s : %s - job stopped at %s progress", diskLabel, srcPath, blockCopyProgress); + logger.warn(msg); + return new MigrateVolumeAnswer(command, false, msg, null); } - break; } } else { logger.info("Failed to get the block copy status, trying to abort the job"); @@ -291,15 +300,27 @@ public class LibvirtMigrateVolumeCommandWrapper extends CommandWrapper { + private String getSubnetMaskForAddress(NetworkInterface networkInterface, InetAddress inetAddress) { + for (InterfaceAddress address : networkInterface.getInterfaceAddresses()) { + if (!inetAddress.equals(address.getAddress())) { + continue; + } + int prefixLength = address.getNetworkPrefixLength(); + int mask = 0xffffffff << (32 - prefixLength); + return String.format("%d.%d.%d.%d", + (mask >>> 24) & 0xff, + (mask >>> 16) & 0xff, + (mask >>> 8) & 0xff, + mask & 0xff); + } + return ""; + } + + private String getMacAddress(NetworkInterface networkInterface) throws SocketException { + byte[] macBytes = networkInterface.getHardwareAddress(); + if (macBytes == null) { + return ""; + } + StringBuilder macAddress = new StringBuilder(); + for (byte b : macBytes) { + macAddress.append(String.format("%02X:", b)); + } + if (macAddress.length() > 0) { + macAddress.deleteCharAt(macAddress.length() - 1); // Remove trailing colon + } + return macAddress.toString(); + } + + public Ternary getInterfaceDetails(String interfaceName) throws SocketException { + NetworkInterface networkInterface = NetworkInterface.getByName(interfaceName); + if (networkInterface == null) { + logger.warn(String.format("Network interface: '%s' not found", interfaceName)); + return new Ternary<>(null, null, null); + } + Enumeration inetAddresses = networkInterface.getInetAddresses(); + while (inetAddresses.hasMoreElements()) { + InetAddress inetAddress = inetAddresses.nextElement(); + if (inetAddress instanceof java.net.Inet4Address) { + String ipAddress = inetAddress.getHostAddress(); + String subnetMask = getSubnetMaskForAddress(networkInterface, inetAddress); + String macAddress = getMacAddress(networkInterface); + return new Ternary<>(ipAddress, subnetMask, macAddress); + } + } + return new Ternary<>(null, null, null); + } + @Override public Answer execute(final OvsFetchInterfaceCommand command, final LibvirtComputingResource libvirtComputingResource) { - final String label = command.getLabel(); + final String label = "'" + command.getLabel() + "'"; logger.debug("Will look for network with name-label:" + label); try { - String ipadd = Script.runSimpleBashScript("ifconfig " + label + " | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'"); - if (StringUtils.isEmpty(ipadd)) { - ipadd = Script.runSimpleBashScript("ifconfig " + label + " | grep ' inet ' | awk '{ print $2}'"); - } - String mask = Script.runSimpleBashScript("ifconfig " + label + " | grep 'inet addr:' | cut -d: -f4"); - if (StringUtils.isEmpty(mask)) { - mask = Script.runSimpleBashScript("ifconfig " + label + " | grep ' inet ' | awk '{ print $4}'"); - } - String mac = Script.runSimpleBashScript("ifconfig " + label + " | grep HWaddr | awk -F \" \" '{print $5}'"); - if (StringUtils.isEmpty(mac)) { - mac = Script.runSimpleBashScript("ifconfig " + label + " | grep ' ether ' | awk '{ print $2}'"); - } + Ternary interfaceDetails = getInterfaceDetails(label); return new OvsFetchInterfaceAnswer(command, true, "Interface " + label - + " retrieved successfully", ipadd, mask, mac); + + " retrieved successfully", interfaceDetails.first(), interfaceDetails.second(), + interfaceDetails.third()); } catch (final Exception e) { logger.warn("Caught execption when fetching interface", e); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java index c8b20511346..53549487704 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java @@ -46,7 +46,6 @@ import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; import com.cloud.storage.Volume; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.script.Script; @ResourceWrapper(handles = PrepareForMigrationCommand.class) public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapper { @@ -127,9 +126,7 @@ public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapp } catch (final LibvirtException | CloudRuntimeException | InternalErrorException | URISyntaxException e) { if (MapUtils.isNotEmpty(dpdkInterfaceMapping)) { for (DpdkTO to : dpdkInterfaceMapping.values()) { - String cmd = String.format("ovs-vsctl del-port %s", to.getPort()); - logger.debug("Removing DPDK port: " + to.getPort()); - Script.runSimpleBashScript(cmd); + removeDpdkPort(to.getPort()); } } return new PrepareForMigrationAnswer(command, e.toString()); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareStorageClientCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareStorageClientCommandWrapper.java new file mode 100644 index 00000000000..b5cab17ecb1 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareStorageClientCommandWrapper.java @@ -0,0 +1,48 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import java.util.Map; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.PrepareStorageClientAnswer; +import com.cloud.agent.api.PrepareStorageClientCommand; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.Ternary; + +@ResourceWrapper(handles = PrepareStorageClientCommand.class) +public class LibvirtPrepareStorageClientCommandWrapper extends CommandWrapper { + + @Override + public Answer execute(PrepareStorageClientCommand cmd, LibvirtComputingResource libvirtComputingResource) { + final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); + Ternary, String> prepareStorageClientResult = storagePoolMgr.prepareStorageClient(cmd.getPoolType(), cmd.getPoolUuid(), cmd.getDetails()); + if (!prepareStorageClientResult.first()) { + String msg = prepareStorageClientResult.third(); + logger.debug("Unable to prepare storage client, due to: " + msg); + return new PrepareStorageClientAnswer(cmd, false, msg); + } + Map details = prepareStorageClientResult.second(); + return new PrepareStorageClientAnswer(cmd, true, details); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtReadyCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtReadyCommandWrapper.java index 0b0f69f3eed..8f23e79e4a3 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtReadyCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtReadyCommandWrapper.java @@ -19,7 +19,9 @@ package com.cloud.hypervisor.kvm.resource.wrapper; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; import com.cloud.agent.api.Answer; @@ -33,7 +35,6 @@ import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; import com.cloud.utils.script.Script; - @ResourceWrapper(handles = ReadyCommand.class) public final class LibvirtReadyCommandWrapper extends CommandWrapper { @@ -50,13 +51,18 @@ public final class LibvirtReadyCommandWrapper extends CommandWrapper commands = new ArrayList<>(); + commands.add(new String[]{Script.getExecutableAbsolutePath("rpm"), "-qa"}); + commands.add(new String[]{Script.getExecutableAbsolutePath("grep"), "-i", "ovmf"}); + result = Script.executePipedCommands(commands, timeout).first(); } - logger.debug("Running command : [" + cmd + "] with timeout : " + timeout + " ms"); - int result = Script.runSimpleBashScriptForExitValue(cmd, timeout, false); logger.debug("Got result : " + result); return result == 0; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRestoreBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRestoreBackupCommandWrapper.java new file mode 100644 index 00000000000..23ead355096 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRestoreBackupCommandWrapper.java @@ -0,0 +1,203 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; +import com.cloud.vm.VirtualMachine; +import org.apache.cloudstack.backup.BackupAnswer; +import org.apache.cloudstack.backup.RestoreBackupCommand; +import org.apache.commons.lang3.RandomStringUtils; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.List; +import java.util.Locale; +import java.util.Objects; + +@ResourceWrapper(handles = RestoreBackupCommand.class) +public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper { + private static final String BACKUP_TEMP_FILE_PREFIX = "csbackup"; + private static final String MOUNT_COMMAND = "sudo mount -t %s %s %s"; + private static final String UMOUNT_COMMAND = "sudo umount %s"; + private static final String FILE_PATH_PLACEHOLDER = "%s/%s"; + private static final String ATTACH_DISK_COMMAND = " virsh attach-disk %s %s %s --cache none"; + private static final String CURRRENT_DEVICE = "virsh domblklist --domain %s | tail -n 3 | head -n 1 | awk '{print $1}'"; + private static final String RSYNC_COMMAND = "rsync -az %s %s"; + + @Override + public Answer execute(RestoreBackupCommand command, LibvirtComputingResource serverResource) { + String vmName = command.getVmName(); + String backupPath = command.getBackupPath(); + String backupRepoAddress = command.getBackupRepoAddress(); + String backupRepoType = command.getBackupRepoType(); + String mountOptions = command.getMountOptions(); + Boolean vmExists = command.isVmExists(); + String diskType = command.getDiskType(); + List volumePaths = command.getVolumePaths(); + String restoreVolumeUuid = command.getRestoreVolumeUUID(); + + String newVolumeId = null; + if (Objects.isNull(vmExists)) { + String volumePath = volumePaths.get(0); + int lastIndex = volumePath.lastIndexOf("/"); + newVolumeId = volumePath.substring(lastIndex + 1); + restoreVolume(backupPath, backupRepoType, backupRepoAddress, volumePath, diskType, restoreVolumeUuid, + new Pair<>(vmName, command.getVmState())); + } else if (Boolean.TRUE.equals(vmExists)) { + restoreVolumesOfExistingVM(volumePaths, backupPath, backupRepoType, backupRepoAddress, mountOptions); + } else { + restoreVolumesOfDestroyedVMs(volumePaths, vmName, backupPath, backupRepoType, backupRepoAddress, mountOptions); + } + + return new BackupAnswer(command, true, newVolumeId); + } + + private void restoreVolumesOfExistingVM(List volumePaths, String backupPath, + String backupRepoType, String backupRepoAddress, String mountOptions) { + String diskType = "root"; + String mountDirectory = mountBackupDirectory(backupRepoAddress, backupRepoType); + try { + for (int idx = 0; idx < volumePaths.size(); idx++) { + String volumePath = volumePaths.get(idx); + Pair bkpPathAndVolUuid = getBackupPath(mountDirectory, volumePath, backupPath, diskType, null); + diskType = "datadisk"; + try { + replaceVolumeWithBackup(volumePath, bkpPathAndVolUuid.first()); + } catch (IOException e) { + throw new CloudRuntimeException(String.format("Unable to revert backup for volume [%s] due to [%s].", bkpPathAndVolUuid.second(), e.getMessage()), e); + } + } + } finally { + unmountBackupDirectory(mountDirectory); + deleteTemporaryDirectory(mountDirectory); + } + + } + + private void restoreVolumesOfDestroyedVMs(List volumePaths, String vmName, String backupPath, + String backupRepoType, String backupRepoAddress, String mountOptions) { + String mountDirectory = mountBackupDirectory(backupRepoAddress, backupRepoType); + String diskType = "root"; + try { + for (int i = 0; i < volumePaths.size(); i++) { + String volumePath = volumePaths.get(i); + Pair bkpPathAndVolUuid = getBackupPath(mountDirectory, volumePath, backupPath, diskType, null); + diskType = "datadisk"; + try { + replaceVolumeWithBackup(volumePath, bkpPathAndVolUuid.first()); + } catch (IOException e) { + throw new CloudRuntimeException(String.format("Unable to revert backup for volume [%s] due to [%s].", bkpPathAndVolUuid.second(), e.getMessage()), e); + } + } + } finally { + unmountBackupDirectory(mountDirectory); + deleteTemporaryDirectory(mountDirectory); + } + } + + private void restoreVolume(String backupPath, String backupRepoType, String backupRepoAddress, String volumePath, + String diskType, String volumeUUID, Pair vmNameAndState) { + String mountDirectory = mountBackupDirectory(backupRepoAddress, backupRepoType); + Pair bkpPathAndVolUuid; + try { + bkpPathAndVolUuid = getBackupPath(mountDirectory, volumePath, backupPath, diskType, volumeUUID); + try { + replaceVolumeWithBackup(volumePath, bkpPathAndVolUuid.first()); + if (VirtualMachine.State.Running.equals(vmNameAndState.second())) { + if (!attachVolumeToVm(vmNameAndState.first(), volumePath)) { + throw new CloudRuntimeException(String.format("Failed to attach volume to VM: %s", vmNameAndState.first())); + } + } + } catch (IOException e) { + throw new CloudRuntimeException(String.format("Unable to revert backup for volume [%s] due to [%s].", bkpPathAndVolUuid.second(), e.getMessage()), e); + } + } catch (Exception e) { + throw new CloudRuntimeException("Failed to restore volume", e); + } finally { + unmountBackupDirectory(mountDirectory); + deleteTemporaryDirectory(mountDirectory); + } + } + + + private String mountBackupDirectory(String backupRepoAddress, String backupRepoType) { + String randomChars = RandomStringUtils.random(5, true, false); + String mountDirectory = String.format("%s.%s",BACKUP_TEMP_FILE_PREFIX , randomChars); + try { + mountDirectory = Files.createTempDirectory(mountDirectory).toString(); + String mount = String.format(MOUNT_COMMAND, backupRepoType, backupRepoAddress, mountDirectory); + Script.runSimpleBashScript(mount); + } catch (Exception e) { + throw new CloudRuntimeException(String.format("Failed to mount %s to %s", backupRepoType, backupRepoAddress), e); + } + return mountDirectory; + } + + private void unmountBackupDirectory(String backupDirectory) { + try { + String umountCmd = String.format(UMOUNT_COMMAND, backupDirectory); + Script.runSimpleBashScript(umountCmd); + } catch (Exception e) { + throw new CloudRuntimeException(String.format("Failed to unmount backup directory: %s", backupDirectory), e); + } + } + + private void deleteTemporaryDirectory(String backupDirectory) { + try { + Files.deleteIfExists(Paths.get(backupDirectory)); + } catch (IOException e) { + throw new CloudRuntimeException(String.format("Failed to delete backup directory: %s", backupDirectory), e); + } + } + + private Pair getBackupPath(String mountDirectory, String volumePath, String backupPath, String diskType, String volumeUuid) { + String bkpPath = String.format(FILE_PATH_PLACEHOLDER, mountDirectory, backupPath); + int lastIndex = volumePath.lastIndexOf(File.separator); + String volUuid = Objects.isNull(volumeUuid) ? volumePath.substring(lastIndex + 1) : volumeUuid; + String backupFileName = String.format("%s.%s.qcow2", diskType.toLowerCase(Locale.ROOT), volUuid); + bkpPath = String.format(FILE_PATH_PLACEHOLDER, bkpPath, backupFileName); + return new Pair<>(bkpPath, volUuid); + } + + private void replaceVolumeWithBackup(String volumePath, String backupPath) throws IOException { + Script.runSimpleBashScript(String.format(RSYNC_COMMAND, backupPath, volumePath)); + } + + private boolean attachVolumeToVm(String vmName, String volumePath) { + String deviceToAttachDiskTo = getDeviceToAttachDisk(vmName); + int exitValue = Script.runSimpleBashScriptForExitValue(String.format(ATTACH_DISK_COMMAND, vmName, volumePath, deviceToAttachDiskTo)); + return exitValue == 0; + } + + private String getDeviceToAttachDisk(String vmName) { + String currentDevice = Script.runSimpleBashScript(String.format(CURRRENT_DEVICE, vmName)); + char lastChar = currentDevice.charAt(currentDevice.length() - 1); + char incrementedChar = (char) (lastChar + 1); + return currentDevice.substring(0, currentDevice.length() - 1) + incrementedChar; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevokeDirectDownloadCertificateWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevokeDirectDownloadCertificateWrapper.java index 34815155711..9919689cf3b 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevokeDirectDownloadCertificateWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevokeDirectDownloadCertificateWrapper.java @@ -19,6 +19,14 @@ package com.cloud.hypervisor.kvm.resource.wrapper; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; + +import org.apache.cloudstack.agent.directdownload.RevokeDirectDownloadCertificateCommand; +import org.apache.cloudstack.utils.security.KeyStoreUtils; +import org.apache.commons.lang3.StringUtils; + import com.cloud.agent.api.Answer; import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; import com.cloud.resource.CommandWrapper; @@ -26,13 +34,6 @@ import com.cloud.resource.ResourceWrapper; import com.cloud.utils.PropertiesUtil; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; -import org.apache.cloudstack.agent.directdownload.RevokeDirectDownloadCertificateCommand; -import org.apache.cloudstack.utils.security.KeyStoreUtils; -import org.apache.commons.lang3.StringUtils; - -import java.io.File; -import java.io.FileNotFoundException; -import java.io.IOException; @ResourceWrapper(handles = RevokeDirectDownloadCertificateCommand.class) public class LibvirtRevokeDirectDownloadCertificateWrapper extends CommandWrapper { @@ -82,17 +83,17 @@ public class LibvirtRevokeDirectDownloadCertificateWrapper extends CommandWrappe } final String keyStoreFile = getKeyStoreFilePath(agentFile); - - String checkCmd = String.format("keytool -list -alias %s -keystore %s -storepass %s", - certificateAlias, keyStoreFile, privatePassword); - int existsCmdResult = Script.runSimpleBashScriptForExitValue(checkCmd); + String keyToolPath = Script.getExecutableAbsolutePath("keytool"); + int existsCmdResult = Script.executeCommandForExitValue(keyToolPath, "-list", "-alias", + sanitizeBashCommandArgument(certificateAlias), "-keystore", keyStoreFile, "-storepass", + privatePassword); if (existsCmdResult == 1) { logger.error("Certificate alias " + certificateAlias + " does not exist, no need to revoke it"); } else { - String revokeCmd = String.format("keytool -delete -alias %s -keystore %s -storepass %s", - certificateAlias, keyStoreFile, privatePassword); logger.debug("Revoking certificate alias " + certificateAlias + " from keystore " + keyStoreFile); - Script.runSimpleBashScriptForExitValue(revokeCmd); + Script.executeCommandForExitValue(keyToolPath, "-delete", "-alias", + sanitizeBashCommandArgument(certificateAlias), "-keystore", keyStoreFile, "-storepass", + privatePassword); } } catch (FileNotFoundException | CloudRuntimeException e) { logger.error("Error while setting up certificate " + certificateAlias, e); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtSetupDirectDownloadCertificateCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtSetupDirectDownloadCertificateCommandWrapper.java index eb4e6be7609..fcca16ba618 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtSetupDirectDownloadCertificateCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtSetupDirectDownloadCertificateCommandWrapper.java @@ -18,20 +18,25 @@ // package com.cloud.hypervisor.kvm.resource.wrapper; -import com.cloud.agent.api.Answer; -import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; -import com.cloud.resource.CommandWrapper; -import com.cloud.resource.ResourceWrapper; -import com.cloud.utils.PropertiesUtil; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.script.Script; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; + import org.apache.cloudstack.agent.directdownload.SetupDirectDownloadCertificateCommand; import org.apache.cloudstack.utils.security.KeyStoreUtils; import org.apache.commons.lang3.StringUtils; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.IOException; +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.FileUtil; +import com.cloud.utils.PropertiesUtil; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; @ResourceWrapper(handles = SetupDirectDownloadCertificateCommand.class) public class LibvirtSetupDirectDownloadCertificateCommandWrapper extends CommandWrapper { @@ -77,9 +82,10 @@ public class LibvirtSetupDirectDownloadCertificateCommandWrapper extends Command */ private void importCertificate(String tempCerFilePath, String keyStoreFile, String certificateName, String privatePassword) { logger.debug("Importing certificate from temporary file to keystore"); - String importCommandFormat = "keytool -importcert -file %s -keystore %s -alias '%s' -storepass '%s' -noprompt"; - String importCmd = String.format(importCommandFormat, tempCerFilePath, keyStoreFile, certificateName, privatePassword); - int result = Script.runSimpleBashScriptForExitValue(importCmd); + String keyToolPath = Script.getExecutableAbsolutePath("keytool"); + int result = Script.executeCommandForExitValue(keyToolPath, "-importcert", "file", tempCerFilePath, + "-keystore", keyStoreFile, "-alias", sanitizeBashCommandArgument(certificateName), "-storepass", + privatePassword, "-noprompt"); if (result != 0) { logger.debug("Certificate " + certificateName + " not imported as it already exist on keystore"); } @@ -92,8 +98,7 @@ public class LibvirtSetupDirectDownloadCertificateCommandWrapper extends Command String tempCerFilePath = String.format("%s/%s-%s", agentFile.getParent(), temporaryCertFilePrefix, certificateName); logger.debug("Creating temporary certificate file into: " + tempCerFilePath); - int result = Script.runSimpleBashScriptForExitValue(String.format("echo '%s' > %s", certificate, tempCerFilePath)); - if (result != 0) { + if (!FileUtil.writeToFile(tempCerFilePath, certificate)) { throw new CloudRuntimeException("Could not create the certificate file on path: " + tempCerFilePath); } return tempCerFilePath; @@ -102,9 +107,24 @@ public class LibvirtSetupDirectDownloadCertificateCommandWrapper extends Command /** * Remove temporary file */ - private void cleanupTemporaryFile(String temporaryFile) { + + protected void cleanupTemporaryFile(String temporaryFile) { logger.debug("Cleaning up temporary certificate file"); - Script.runSimpleBashScript("rm -f " + temporaryFile); + if (StringUtils.isBlank(temporaryFile)) { + logger.debug("Provided temporary certificate file path is empty"); + return; + } + try { + Path filePath = Paths.get(temporaryFile); + if (!Files.exists(filePath)) { + logger.debug("Temporary certificate file does not exist: " + temporaryFile); + return; + } + Files.delete(filePath); + } catch (IOException e) { + logger.warn(String.format("Error while cleaning up temporary file: %s", temporaryFile)); + logger.debug(String.format("Error while cleaning up temporary file: %s", temporaryFile), e); + } } @Override diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java index 32d687ff98c..e0c41acea12 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java @@ -90,7 +90,7 @@ public final class LibvirtStartCommandWrapper extends CommandWrapper { @@ -119,10 +118,7 @@ public final class LibvirtStopCommandWrapper extends CommandWrapper dpdkInterfaceMapping = command.getDpdkInterfaceMapping(); if (MapUtils.isNotEmpty(dpdkInterfaceMapping)) { for (DpdkTO to : dpdkInterfaceMapping.values()) { - String portToRemove = to.getPort(); - String cmd = String.format("ovs-vsctl del-port %s", portToRemove); - logger.debug("Removing DPDK port: " + portToRemove); - Script.runSimpleBashScript(cmd); + removeDpdkPort(to.getPort()); } } } else { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtTakeBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtTakeBackupCommandWrapper.java new file mode 100644 index 00000000000..3c0cc53bb73 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtTakeBackupCommandWrapper.java @@ -0,0 +1,84 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.amazonaws.util.CollectionUtils; +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.Pair; +import com.cloud.utils.script.Script; +import org.apache.cloudstack.backup.BackupAnswer; +import org.apache.cloudstack.backup.TakeBackupCommand; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +@ResourceWrapper(handles = TakeBackupCommand.class) +public class LibvirtTakeBackupCommandWrapper extends CommandWrapper { + @Override + public Answer execute(TakeBackupCommand command, LibvirtComputingResource libvirtComputingResource) { + final String vmName = command.getVmName(); + final String backupPath = command.getBackupPath(); + final String backupRepoType = command.getBackupRepoType(); + final String backupRepoAddress = command.getBackupRepoAddress(); + final String mountOptions = command.getMountOptions(); + final List diskPaths = command.getVolumePaths(); + + List commands = new ArrayList<>(); + commands.add(new String[]{ + libvirtComputingResource.getNasBackupPath(), + "-o", "backup", + "-v", vmName, + "-t", backupRepoType, + "-s", backupRepoAddress, + "-m", Objects.nonNull(mountOptions) ? mountOptions : "", + "-p", backupPath, + "-d", (Objects.nonNull(diskPaths) && !diskPaths.isEmpty()) ? String.join(",", diskPaths) : "" + }); + + Pair result = Script.executePipedCommands(commands, libvirtComputingResource.getCmdsTimeout()); + + if (result.first() != 0) { + logger.debug("Failed to take VM backup: " + result.second()); + return new BackupAnswer(command, false, result.second().trim()); + } + + long backupSize = 0L; + if (CollectionUtils.isNullOrEmpty(diskPaths)) { + List outputLines = Arrays.asList(result.second().trim().split("\n")); + if (!outputLines.isEmpty()) { + backupSize = Long.parseLong(outputLines.get(outputLines.size() - 1).trim()); + } + } else { + String[] outputLines = result.second().trim().split("\n"); + for(String line : outputLines) { + backupSize = backupSize + Long.parseLong(line.split(" ")[0].trim()); + } + } + + BackupAnswer answer = new BackupAnswer(command, true, result.second().trim()); + answer.setSize(backupSize); + return answer; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtUnprepareStorageClientCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtUnprepareStorageClientCommandWrapper.java new file mode 100644 index 00000000000..2f23a934003 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtUnprepareStorageClientCommandWrapper.java @@ -0,0 +1,45 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.UnprepareStorageClientAnswer; +import com.cloud.agent.api.UnprepareStorageClientCommand; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.Pair; + +@ResourceWrapper(handles = UnprepareStorageClientCommand.class) +public class LibvirtUnprepareStorageClientCommandWrapper extends CommandWrapper { + + @Override + public Answer execute(UnprepareStorageClientCommand cmd, LibvirtComputingResource libvirtComputingResource) { + final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); + Pair unprepareStorageClientResult = storagePoolMgr.unprepareStorageClient(cmd.getPoolType(), cmd.getPoolUuid()); + if (!unprepareStorageClientResult.first()) { + String msg = unprepareStorageClientResult.second(); + logger.debug("Couldn't unprepare storage client, due to: " + msg); + return new UnprepareStorageClientAnswer(cmd, false, msg); + } + return new UnprepareStorageClientAnswer(cmd, true); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java index 83636b9a9c3..0a25b89d8c5 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java @@ -16,12 +16,36 @@ // under the License. package com.cloud.hypervisor.kvm.storage; +import java.net.InetAddress; +import java.net.UnknownHostException; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + import com.cloud.storage.Storage; import com.cloud.utils.exception.CloudRuntimeException; public class FiberChannelAdapter extends MultipathSCSIAdapterBase { + + private Logger LOGGER = LogManager.getLogger(getClass()); + + private String hostname = null; + private String hostnameFq = null; + public FiberChannelAdapter() { LOGGER.info("Loaded FiberChannelAdapter for StorageLayer"); + // get the hostname - we need this to compare to connid values + try { + InetAddress inetAddress = InetAddress.getLocalHost(); + hostname = inetAddress.getHostName(); // basic hostname + if (hostname.indexOf(".") > 0) { + hostname = hostname.substring(0, hostname.indexOf(".")); // strip off domain + } + hostnameFq = inetAddress.getCanonicalHostName(); // fully qualified hostname + LOGGER.info("Loaded FiberChannelAdapter for StorageLayer on host [" + hostname + "]"); + } catch (UnknownHostException e) { + LOGGER.error("Error getting hostname", e); + } } @Override @@ -76,6 +100,11 @@ public class FiberChannelAdapter extends MultipathSCSIAdapterBase { address = value; } else if (key.equals("connid")) { connectionId = value; + } else if (key.startsWith("connid.")) { + String inHostname = key.substring(7); + if (inHostname != null && (inHostname.equals(this.hostname) || inHostname.equals(this.hostnameFq))) { + connectionId = value; + } } } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMPhysicalDisk.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMPhysicalDisk.java index c9abf399530..9d9a6415e27 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMPhysicalDisk.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMPhysicalDisk.java @@ -18,6 +18,7 @@ package com.cloud.hypervisor.kvm.storage; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import org.apache.cloudstack.utils.qemu.QemuObject; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.StringUtils; import java.util.ArrayList; @@ -25,8 +26,10 @@ import java.util.List; public class KVMPhysicalDisk { private String path; - private String name; - private KVMStoragePool pool; + private final String name; + private final KVMStoragePool pool; + private String dispName; + private String vmName; private boolean useAsTemplate; public static String RBDStringBuilder(String monHost, int monPort, String authUserName, String authSecret, String image) { @@ -81,7 +84,9 @@ public class KVMPhysicalDisk { @Override public String toString() { - return "KVMPhysicalDisk [path=" + path + ", name=" + name + ", pool=" + pool + ", format=" + format + ", size=" + size + ", virtualSize=" + virtualSize + "]"; + return String.format("KVMPhysicalDisk %s", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields( + this, "path", "name", "pool", "format", "size", "virtualSize", "dispName", "vmName")); } public void setFormat(PhysicalDiskFormat format) { @@ -135,4 +140,20 @@ public class KVMPhysicalDisk { public void setUseAsTemplate() { this.useAsTemplate = true; } public boolean useAsTemplate() { return this.useAsTemplate; } + + public String getDispName() { + return dispName; + } + + public void setDispName(String dispName) { + this.dispName = dispName; + } + + public String getVmName() { + return vmName; + } + + public void setVmName(String vmName) { + this.vmName = vmName; + } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java index 96a4da09686..674799c0bbe 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java @@ -38,6 +38,9 @@ public interface KVMStoragePool { public static final long HeartBeatUpdateRetrySleep = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_HEARTBEAT_UPDATE_RETRY_SLEEP); public static final long HeartBeatCheckerTimeout = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_HEARTBEAT_CHECKER_TIMEOUT); + public default KVMPhysicalDisk createPhysicalDisk(String volumeUuid, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, Long usableSize, byte[] passphrase) { + return createPhysicalDisk(volumeUuid, format, provisioningType, size, passphrase); + } public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java index 27f70b71ab4..3c8026c7ffd 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java @@ -45,6 +45,8 @@ import com.cloud.storage.Storage; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StorageLayer; import com.cloud.storage.Volume; +import com.cloud.utils.Pair; +import com.cloud.utils.Ternary; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine; @@ -475,4 +477,13 @@ public class KVMStoragePoolManager { return adaptor.createTemplateFromDirectDownloadFile(templateFilePath, destTemplatePath, destPool, format, timeout); } + public Ternary, String> prepareStorageClient(StoragePoolType type, String uuid, Map details) { + StorageAdaptor adaptor = getStorageAdaptor(type); + return adaptor.prepareStorageClient(type, uuid, details); + } + + public Pair unprepareStorageClient(StoragePoolType type, String uuid) { + StorageAdaptor adaptor = getStorageAdaptor(type); + return adaptor.unprepareStorageClient(type, uuid); + } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index 008768f25e9..04662604382 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -134,6 +134,10 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; import com.cloud.utils.storage.S3.S3Utils; import com.cloud.vm.VmDetailConstants; +import org.apache.cloudstack.utils.cryptsetup.KeyFile; +import org.apache.cloudstack.utils.qemu.QemuImageOptions; +import org.apache.cloudstack.utils.qemu.QemuObject.EncryptFormat; +import java.util.ArrayList; public class KVMStorageProcessor implements StorageProcessor { protected Logger logger = LogManager.getLogger(getClass()); @@ -267,7 +271,7 @@ public class KVMStorageProcessor implements StorageProcessor { Map details = primaryStore.getDetails(); - String path = details != null ? details.get("managedStoreTarget") : null; + String path = derivePath(primaryStore, destData, details); if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); @@ -327,6 +331,16 @@ public class KVMStorageProcessor implements StorageProcessor { } } + private String derivePath(PrimaryDataStoreTO primaryStore, DataTO destData, Map details) { + String path = null; + if (primaryStore.getPoolType() == StoragePoolType.FiberChannel) { + path = destData.getPath(); + } else { + path = details != null ? details.get("managedStoreTarget") : null; + } + return path; + } + // this is much like PrimaryStorageDownloadCommand, but keeping it separate. copies template direct to root disk private KVMPhysicalDisk templateToPrimaryDownload(final String templateUrl, final KVMStoragePool primaryPool, final String volUuid, final Long size, final int timeout) { final int index = templateUrl.lastIndexOf("/"); @@ -406,7 +420,7 @@ public class KVMStorageProcessor implements StorageProcessor { vol = templateToPrimaryDownload(templatePath, primaryPool, volume.getUuid(), volume.getSize(), cmd.getWaitInMillSeconds()); } if (storagePoolMgr.supportsPhysicalDiskCopy(primaryPool.getType())) { Map details = primaryStore.getDetails(); - String path = details != null ? details.get("managedStoreTarget") : null; + String path = derivePath(primaryStore, destData, details); if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath, details)) { logger.warn("Failed to connect base template volume at path: " + templatePath + ", in storage pool id: " + primaryStore.getUuid()); @@ -421,6 +435,7 @@ public class KVMStorageProcessor implements StorageProcessor { if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { logger.warn("Failed to connect new volume at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); } + BaseVol.setDispName(template.getName()); vol = storagePoolMgr.copyPhysicalDisk(BaseVol, path != null ? path : volume.getUuid(), primaryPool, cmd.getWaitInMillSeconds(), null, volume.getPassphrase(), volume.getProvisioningType()); @@ -513,6 +528,8 @@ public class KVMStorageProcessor implements StorageProcessor { final KVMPhysicalDisk volume = secondaryStoragePool.getPhysicalDisk(srcVolumeName); volume.setFormat(PhysicalDiskFormat.valueOf(srcFormat.toString())); + volume.setDispName(srcVol.getName()); + volume.setVmName(srcVol.getVmName()); final KVMPhysicalDisk newDisk = storagePoolMgr.copyPhysicalDisk(volume, path != null ? path : volumeName, primaryPool, cmd.getWaitInMillSeconds()); @@ -1047,7 +1064,7 @@ public class KVMStorageProcessor implements StorageProcessor { srcVolume.clearPassphrase(); if (isCreatedFromVmSnapshot) { logger.debug("Ignoring removal of vm snapshot on primary as this snapshot is created from vm snapshot"); - } else if (primaryPool.getType() != StoragePoolType.RBD) { + } else if (primaryPool != null && primaryPool.getType() != StoragePoolType.RBD) { deleteSnapshotOnPrimary(cmd, snapshot, primaryPool); } @@ -1633,7 +1650,7 @@ public class KVMStorageProcessor implements StorageProcessor { } } else { vol = primaryPool.createPhysicalDisk(volume.getUuid(), format, - volume.getProvisioningType(), disksize, volume.getPassphrase()); + volume.getProvisioningType(), disksize, volume.getUsableSize(), volume.getPassphrase()); } final VolumeObjectTO newVol = new VolumeObjectTO(); @@ -1748,7 +1765,7 @@ public class KVMStorageProcessor implements StorageProcessor { snapshotPath = getSnapshotPathInPrimaryStorage(primaryPool.getLocalPath(), snapshotName); String diskLabel = takeVolumeSnapshot(resource.getDisks(conn, vmName), snapshotName, diskPath, vm); - String convertResult = convertBaseFileToSnapshotFileInPrimaryStorageDir(primaryPool, diskPath, snapshotPath, volume, cmd.getWait()); + String convertResult = convertBaseFileToSnapshotFileInPrimaryStorageDir(primaryPool, disk, snapshotPath, volume, cmd.getWait()); mergeSnapshotIntoBaseFile(vm, diskLabel, diskPath, snapshotName, volume, conn); @@ -1817,7 +1834,7 @@ public class KVMStorageProcessor implements StorageProcessor { } } else { snapshotPath = getSnapshotPathInPrimaryStorage(primaryPool.getLocalPath(), snapshotName); - String convertResult = convertBaseFileToSnapshotFileInPrimaryStorageDir(primaryPool, diskPath, snapshotPath, volume, cmd.getWait()); + String convertResult = convertBaseFileToSnapshotFileInPrimaryStorageDir(primaryPool, disk, snapshotPath, volume, cmd.getWait()); validateConvertResult(convertResult, snapshotPath); } } @@ -1940,26 +1957,43 @@ public class KVMStorageProcessor implements StorageProcessor { * @param snapshotPath Path to convert the base file; * @return null if the conversion occurs successfully or an error message that must be handled. */ - protected String convertBaseFileToSnapshotFileInPrimaryStorageDir(KVMStoragePool primaryPool, String baseFile, String snapshotPath, VolumeObjectTO volume, int wait) { - try { - logger.debug(String.format("Trying to convert volume [%s] (%s) to snapshot [%s].", volume, baseFile, snapshotPath)); + protected String convertBaseFileToSnapshotFileInPrimaryStorageDir(KVMStoragePool primaryPool, + KVMPhysicalDisk baseFile, String snapshotPath, VolumeObjectTO volume, int wait) { + try (KeyFile srcKey = new KeyFile(volume.getPassphrase())) { + logger.debug( + String.format("Trying to convert volume [%s] (%s) to snapshot [%s].", volume, baseFile, snapshotPath)); primaryPool.createFolder(TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR); - - QemuImgFile srcFile = new QemuImgFile(baseFile); - srcFile.setFormat(PhysicalDiskFormat.QCOW2); - - QemuImgFile destFile = new QemuImgFile(snapshotPath); - destFile.setFormat(PhysicalDiskFormat.QCOW2); - - QemuImg q = new QemuImg(wait); - q.convert(srcFile, destFile); - - logger.debug(String.format("Converted volume [%s] (from path \"%s\") to snapshot [%s].", volume, baseFile, snapshotPath)); - return null; - } catch (QemuImgException | LibvirtException ex) { - return String.format("Failed to convert %s snapshot of volume [%s] to [%s] due to [%s].", volume, baseFile, snapshotPath, ex.getMessage()); + convertTheBaseFileToSnapshot(baseFile, snapshotPath, wait, srcKey); + } catch (QemuImgException | LibvirtException | IOException ex) { + return String.format("Failed to convert %s snapshot of volume [%s] to [%s] due to [%s].", volume, baseFile, + snapshotPath, ex.getMessage()); } + + logger.debug(String.format("Converted volume [%s] (from path \"%s\") to snapshot [%s].", volume, baseFile, + snapshotPath)); + return null; + } + + private void convertTheBaseFileToSnapshot(KVMPhysicalDisk baseFile, String snapshotPath, int wait, KeyFile srcKey) + throws LibvirtException, QemuImgException { + List qemuObjects = new ArrayList<>(); + Map options = new HashMap<>(); + QemuImageOptions qemuImageOpts = new QemuImageOptions(baseFile.getPath()); + if (srcKey.isSet()) { + String srcKeyName = "sec0"; + qemuObjects.add(QemuObject.prepareSecretForQemuImg(baseFile.getFormat(), EncryptFormat.LUKS, + srcKey.toString(), srcKeyName, options)); + qemuImageOpts = new QemuImageOptions(baseFile.getFormat(), baseFile.getPath(), srcKeyName); + } + QemuImgFile srcFile = new QemuImgFile(baseFile.getPath()); + srcFile.setFormat(PhysicalDiskFormat.QCOW2); + + QemuImgFile destFile = new QemuImgFile(snapshotPath); + destFile.setFormat(PhysicalDiskFormat.QCOW2); + + QemuImg q = new QemuImg(wait); + q.convert(srcFile, destFile, options, qemuObjects, qemuImageOpts, null, true); } /** @@ -2461,14 +2495,15 @@ public class KVMStorageProcessor implements StorageProcessor { } volume.setFormat(PhysicalDiskFormat.valueOf(srcFormat.toString())); + volume.setDispName(srcVol.getName()); + volume.setVmName(srcVol.getVmName()); String destVolumeName = null; if (destPrimaryStore.isManaged()) { if (!storagePoolMgr.connectPhysicalDisk(destPrimaryStore.getPoolType(), destPrimaryStore.getUuid(), destVolumePath, destPrimaryStore.getDetails())) { logger.warn("Failed to connect dest volume at path: " + destVolumePath + ", in storage pool id: " + destPrimaryStore.getUuid()); } - String managedStoreTarget = destPrimaryStore.getDetails() != null ? destPrimaryStore.getDetails().get("managedStoreTarget") : null; - destVolumeName = managedStoreTarget != null ? managedStoreTarget : destVolumePath; + destVolumeName = derivePath(destPrimaryStore, destData, destPrimaryStore.getDetails()); } else { final String volumeName = UUID.randomUUID().toString(); destVolumeName = volumeName + "." + destFormat.getFileExtension(); @@ -2476,7 +2511,9 @@ public class KVMStorageProcessor implements StorageProcessor { destPool = storagePoolMgr.getStoragePool(destPrimaryStore.getPoolType(), destPrimaryStore.getUuid()); try { - if (srcVol.getPassphrase() != null && srcVol.getVolumeType().equals(Volume.Type.ROOT)) { + Volume.Type volumeType = srcVol.getVolumeType(); + + if (srcVol.getPassphrase() != null && (Volume.Type.ROOT.equals(volumeType) || Volume.Type.DATADISK.equals(volumeType))) { volume.setQemuEncryptFormat(QemuObject.EncryptFormat.LUKS); storagePoolMgr.copyPhysicalDisk(volume, destVolumeName, destPool, cmd.getWaitInMillSeconds(), srcVol.getPassphrase(), destVol.getPassphrase(), srcVol.getProvisioningType()); } else { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java index 3002fea41d3..45a65037340 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java @@ -25,6 +25,7 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.utils.cryptsetup.KeyFile; import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; @@ -34,6 +35,7 @@ import org.apache.cloudstack.utils.qemu.QemuObject; import org.apache.commons.codec.binary.Base64; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.commons.collections.CollectionUtils; import org.libvirt.Connect; import org.libvirt.LibvirtException; import org.libvirt.Secret; @@ -273,9 +275,19 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { } } - private StoragePool createNetfsStoragePool(PoolType fsType, Connect conn, String uuid, String host, String path) throws LibvirtException { + private void checkNetfsStoragePoolMounted(String uuid) { String targetPath = _mountPoint + File.separator + uuid; - LibvirtStoragePoolDef spd = new LibvirtStoragePoolDef(fsType, uuid, uuid, host, path, targetPath); + int mountpointResult = Script.runSimpleBashScriptForExitValue("mountpoint -q " + targetPath); + if (mountpointResult != 0) { + String errMsg = String.format("libvirt failed to mount storage pool %s at %s", uuid, targetPath); + logger.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + } + + private StoragePool createNetfsStoragePool(PoolType fsType, Connect conn, String uuid, String host, String path, List nfsMountOpts) throws LibvirtException { + String targetPath = _mountPoint + File.separator + uuid; + LibvirtStoragePoolDef spd = new LibvirtStoragePoolDef(fsType, uuid, uuid, host, path, targetPath, nfsMountOpts); _storageLayer.mkdir(targetPath); StoragePool sp = null; try { @@ -364,6 +376,42 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { } + private List getNFSMountOptsFromDetails(StoragePoolType type, Map details) { + List nfsMountOpts = null; + if (!type.equals(StoragePoolType.NetworkFilesystem) || details == null) { + return nfsMountOpts; + } + if (details.containsKey(ApiConstants.NFS_MOUNT_OPTIONS)) { + nfsMountOpts = Arrays.asList(details.get(ApiConstants.NFS_MOUNT_OPTIONS).replaceAll("\\s", "").split(",")); + } + return nfsMountOpts; + } + + private boolean destroyStoragePoolOnNFSMountOptionsChange(StoragePool sp, Connect conn, List nfsMountOpts) { + try { + LibvirtStoragePoolDef poolDef = getStoragePoolDef(conn, sp); + Set poolNfsMountOpts = poolDef.getNfsMountOpts(); + boolean mountOptsDiffer = false; + if (poolNfsMountOpts.size() != nfsMountOpts.size()) { + mountOptsDiffer = true; + } else { + for (String nfsMountOpt : nfsMountOpts) { + if (!poolNfsMountOpts.contains(nfsMountOpt)) { + mountOptsDiffer = true; + break; + } + } + } + if (mountOptsDiffer) { + sp.destroy(); + return true; + } + } catch (LibvirtException e) { + logger.error("Failure in destroying the pre-existing storage pool for changing the NFS mount options" + e); + } + return false; + } + private StoragePool createRBDStoragePool(Connect conn, String uuid, String host, int port, String userInfo, String path) { LibvirtStoragePoolDef spd; @@ -661,12 +709,21 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { } catch (LibvirtException e) { logger.error("Failure in attempting to see if an existing storage pool might be using the path of the pool to be created:" + e); } + } + + List nfsMountOpts = getNFSMountOptsFromDetails(type, details); + if (sp != null && CollectionUtils.isNotEmpty(nfsMountOpts) && + destroyStoragePoolOnNFSMountOptionsChange(sp, conn, nfsMountOpts)) { + sp = null; + } + + if (sp == null) { logger.debug("Attempting to create storage pool " + name); if (type == StoragePoolType.NetworkFilesystem) { try { - sp = createNetfsStoragePool(PoolType.NETFS, conn, name, host, path); + sp = createNetfsStoragePool(PoolType.NETFS, conn, name, host, path, nfsMountOpts); } catch (LibvirtException e) { logger.error("Failed to create netfs mount: " + host + ":" + path , e); logger.error(e.getStackTrace()); @@ -674,7 +731,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { } } else if (type == StoragePoolType.Gluster) { try { - sp = createNetfsStoragePool(PoolType.GLUSTERFS, conn, name, host, path); + sp = createNetfsStoragePool(PoolType.GLUSTERFS, conn, name, host, path, null); } catch (LibvirtException e) { logger.error("Failed to create glusterfs mount: " + host + ":" + path , e); logger.error(e.getStackTrace()); @@ -699,6 +756,10 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { sp.create(0); } + if (type == StoragePoolType.NetworkFilesystem) { + checkNetfsStoragePoolMounted(name); + } + return getStoragePool(name); } catch (LibvirtException e) { String error = e.toString(); @@ -712,6 +773,42 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { } } + private boolean destroyStoragePool(Connect conn, String uuid) throws LibvirtException { + StoragePool sp; + try { + sp = conn.storagePoolLookupByUUIDString(uuid); + } catch (LibvirtException exc) { + logger.warn("Storage pool " + uuid + " doesn't exist in libvirt. Assuming it is already removed"); + logger.warn(exc.getStackTrace()); + return true; + } + + if (sp != null) { + if (sp.isPersistent() == 1) { + sp.destroy(); + sp.undefine(); + } else { + sp.destroy(); + } + sp.free(); + + return true; + } else { + logger.warn("Storage pool " + uuid + " doesn't exist in libvirt. Assuming it is already removed"); + return false; + } + } + + private boolean destroyStoragePoolHandleException(Connect conn, String uuid) + { + try { + return destroyStoragePool(conn, uuid); + } catch (LibvirtException e) { + logger.error(String.format("Failed to destroy libvirt pool %s: %s", uuid, e)); + } + return false; + } + @Override public boolean deleteStoragePool(String uuid) { logger.info("Attempting to remove storage pool " + uuid + " from libvirt"); @@ -722,16 +819,8 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { throw new CloudRuntimeException(e.toString()); } - StoragePool sp = null; Secret s = null; - try { - sp = conn.storagePoolLookupByUUIDString(uuid); - } catch (LibvirtException e) { - logger.warn("Storage pool " + uuid + " doesn't exist in libvirt. Assuming it is already removed"); - return true; - } - /* * Some storage pools, like RBD also have 'secret' information stored in libvirt * Destroy them if they exist @@ -743,13 +832,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { } try { - if (sp.isPersistent() == 1) { - sp.destroy(); - sp.undefine(); - } else { - sp.destroy(); - } - sp.free(); + destroyStoragePool(conn, uuid); if (s != null) { s.undefine(); s.free(); @@ -762,11 +845,12 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { // handle ebusy error when pool is quickly destroyed if (e.toString().contains("exit status 16")) { String targetPath = _mountPoint + File.separator + uuid; - logger.error("deleteStoragePool removed pool from libvirt, but libvirt had trouble unmounting the pool. Trying umount location " + targetPath + - "again in a few seconds"); + logger.error("deleteStoragePool removed pool from libvirt, but libvirt had trouble unmounting the pool. Trying umount location " + targetPath + + " again in a few seconds"); String result = Script.runSimpleBashScript("sleep 5 && umount " + targetPath); if (result == null) { - logger.error("Succeeded in unmounting " + targetPath); + logger.info("Succeeded in unmounting " + targetPath); + destroyStoragePoolHandleException(conn, uuid); return true; } logger.error("Failed to unmount " + targetPath); @@ -848,7 +932,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { destFile.setFormat(format); destFile.setSize(size); Map options = new HashMap(); - if (pool.getType() == StoragePoolType.NetworkFilesystem){ + if (List.of(StoragePoolType.NetworkFilesystem, StoragePoolType.Filesystem).contains(pool.getType())) { options.put("preallocation", QemuImg.PreallocationType.getPreallocationType(provisioningType).toString()); } @@ -1325,7 +1409,10 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { */ KVMStoragePool srcPool = disk.getPool(); - PhysicalDiskFormat sourceFormat = disk.getFormat(); + /* Linstor images are always stored as RAW, but Linstor uses qcow2 in DB, + to support snapshots(backuped) as qcow2 files. */ + PhysicalDiskFormat sourceFormat = srcPool.getType() != StoragePoolType.Linstor ? + disk.getFormat() : PhysicalDiskFormat.RAW; String sourcePath = disk.getPath(); KVMPhysicalDisk newDisk; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java index 1625ecc171a..03acfcc89ad 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@ -21,18 +21,15 @@ import java.io.BufferedReader; import java.io.File; import java.io.IOException; import java.io.InputStreamReader; -import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Timer; import java.util.TimerTask; -import java.util.UUID; import java.util.concurrent.TimeUnit; import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; -import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; import com.cloud.storage.Storage; @@ -44,7 +41,6 @@ import com.cloud.utils.script.Script; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.libvirt.LibvirtException; import org.joda.time.Duration; public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { @@ -56,6 +52,14 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { */ static byte[] CLEANUP_LOCK = new byte[0]; + /** + * List of supported OUI's (needed for path-based cleanup logic on disconnects after live migrations) + */ + static String[] SUPPORTED_OUI_LIST = { + "0002ac", // HPE Primera 3PAR + "24a937" // Pure Flasharray + }; + /** * Property keys and defaults */ @@ -83,6 +87,7 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { * Initialize static program-wide configurations and background jobs */ static { + long cleanupFrequency = CLEANUP_FREQUENCY_SECS.getFinalValue() * 1000; boolean cleanupEnabled = CLEANUP_ENABLED.getFinalValue(); @@ -97,16 +102,13 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { throw new Error("Unable to find the disconnectVolume.sh script"); } - resizeScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), resizeScript); - if (resizeScript == null) { - throw new Error("Unable to find the resizeVolume.sh script"); - } - copyScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), copyScript); if (copyScript == null) { throw new Error("Unable to find the copyVolume.sh script"); } + resizeScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), resizeScript); + if (cleanupEnabled) { cleanupScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), cleanupScript); if (cleanupScript == null) { @@ -138,9 +140,6 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { public abstract boolean isStoragePoolTypeSupported(Storage.StoragePoolType type); - /** - * We expect WWN values in the volumePath so need to convert it to an actual physical path - */ public abstract AddressInfo parseAndValidatePath(String path); @Override @@ -152,6 +151,7 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { return null; } + // we expect WWN values in the volumePath so need to convert it to an actual physical path AddressInfo address = parseAndValidatePath(volumePath); return getPhysicalDisk(address, pool); } @@ -187,15 +187,23 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { if (StringUtils.isEmpty(volumePath)) { LOGGER.error("Unable to connect physical disk due to insufficient data - volume path is undefined"); - throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data - volume path is underfined"); + return false; } if (pool == null) { LOGGER.error("Unable to connect physical disk due to insufficient data - pool is not set"); - throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data - pool is not set"); + return false; } + // we expect WWN values in the volumePath so need to convert it to an actual physical path AddressInfo address = this.parseAndValidatePath(volumePath); + + // validate we have a connection id - we can't proceed without that + if (address.getConnectionId() == null) { + LOGGER.error("Unable to connect volume with address [" + address.getPath() + "] of the storage pool: " + pool.getUuid() + " - connection id is not set in provided path"); + return false; + } + int waitTimeInSec = diskWaitTimeSecs; if (details != null && details.containsKey(StorageManager.STORAGE_POOL_DISK_WAIT.toString())) { String waitTime = details.get(StorageManager.STORAGE_POOL_DISK_WAIT.toString()); @@ -208,31 +216,62 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { @Override public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool) { - LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumePath,pool) called with args (%s, %s) START", volumePath, pool.getUuid())); + if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDisk(volumePath,pool) called with args (%s, %s) START", volumePath, pool.getUuid())); AddressInfo address = this.parseAndValidatePath(volumePath); + if (address.getAddress() == null) { + if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDisk(volumePath,pool) returning FALSE, volume path has no address field", volumePath, pool.getUuid())); + return false; + } ScriptResult result = runScript(disconnectScript, 60000L, address.getAddress().toLowerCase()); - if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult()); - LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumePath,pool) called with args (%s, %s) COMPLETE [rc=%s]", volumePath, pool.getUuid(), result.getResult())); return true; + + if (result.getExitCode() != 0) { + LOGGER.warn(String.format("Disconnect failed for path [%s] with return code [%s]", address.getAddress().toLowerCase(), result.getExitCode())); + } + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("multipath flush output: " + result.getResult()); + LOGGER.debug(String.format("disconnectPhysicalDisk(volumePath,pool) called with args (%s, %s) COMPLETE [rc=%s]", volumePath, pool.getUuid(), result.getResult())); + } + + return (result.getExitCode() == 0); } @Override public boolean disconnectPhysicalDisk(Map volumeToDisconnect) { - LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumeToDisconnect) called with arg bag [not implemented]:") + " " + volumeToDisconnect); + LOGGER.debug(String.format("disconnectPhysicalDisk(volumeToDisconnect) called with arg bag [not implemented]:") + " " + volumeToDisconnect); return false; } @Override public boolean disconnectPhysicalDiskByPath(String localPath) { - LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) STARTED", localPath)); - ScriptResult result = runScript(disconnectScript, 60000L, localPath.replace("/dev/mapper/3", "")); - if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult()); - LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode())); return true; + if (localPath == null) { + return false; + } + if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) START", localPath)); + if (localPath.startsWith("/dev/mapper/")) { + String multipathName = localPath.replace("/dev/mapper/3", ""); + // this ensures we only disconnect multipath devices supported by this driver + for (String oui: SUPPORTED_OUI_LIST) { + if (multipathName.length() > 1 && multipathName.substring(2).startsWith(oui)) { + ScriptResult result = runScript(disconnectScript, 60000L, multipathName); + if (result.getExitCode() != 0) { + LOGGER.warn(String.format("Disconnect failed for path [%s] with return code [%s]", multipathName, result.getExitCode())); + } + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("multipath flush output: " + result.getResult()); + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode())); + } + return (result.getExitCode() == 0); + } + } + } + if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) returning FALSE, volume path is not a multipath volume: %s", localPath)); + return false; } @Override public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.ImageFormat format) { - LOGGER.info(String.format("deletePhysicalDisk(uuid,pool,format) called with args (%s, %s, %s) [not implemented]", uuid, pool.getUuid(), format.toString())); - return true; + return false; } @Override @@ -276,15 +315,9 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { return true; } - /** - * Validate inputs and return the source file for a template copy - * @param templateFilePath - * @param destTemplatePath - * @param destPool - * @param format - * @return - */ - File createTemplateFromDirectDownloadFileValidate(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format) { + + @Override + public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { if (StringUtils.isAnyEmpty(templateFilePath, destTemplatePath) || destPool == null) { LOGGER.error("Unable to create template from direct download template file due to insufficient data"); throw new CloudRuntimeException("Unable to create template from direct download template file due to insufficient data"); @@ -297,57 +330,18 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { throw new CloudRuntimeException("Direct download template file " + templateFilePath + " does not exist on this host"); } - if (destTemplatePath == null || destTemplatePath.isEmpty()) { - LOGGER.error("Failed to create template, target template disk path not provided"); - throw new CloudRuntimeException("Target template disk path not provided"); - } - - if (this.isStoragePoolTypeSupported(destPool.getType())) { - throw new CloudRuntimeException("Unsupported storage pool type: " + destPool.getType().toString()); - } - - if (Storage.ImageFormat.RAW.equals(format) && Storage.ImageFormat.QCOW2.equals(format)) { - LOGGER.error("Failed to create template, unsupported template format: " + format.toString()); - throw new CloudRuntimeException("Unsupported template format: " + format.toString()); - } - return sourceFile; - } - - String extractSourceTemplateIfNeeded(File sourceFile, String templateFilePath) { - String srcTemplateFilePath = templateFilePath; - if (isTemplateExtractable(templateFilePath)) { - srcTemplateFilePath = sourceFile.getParent() + "/" + UUID.randomUUID().toString(); - LOGGER.debug("Extract the downloaded template " + templateFilePath + " to " + srcTemplateFilePath); - String extractCommand = getExtractCommandForDownloadedFile(templateFilePath, srcTemplateFilePath); - Script.runSimpleBashScript(extractCommand); - Script.runSimpleBashScript("rm -f " + templateFilePath); - } - return srcTemplateFilePath; - } - - QemuImg.PhysicalDiskFormat deriveImgFileFormat(Storage.ImageFormat format) { - if (format == Storage.ImageFormat.RAW) { - return QemuImg.PhysicalDiskFormat.RAW; - } else if (format == Storage.ImageFormat.QCOW2) { - return QemuImg.PhysicalDiskFormat.QCOW2; - } else { - return QemuImg.PhysicalDiskFormat.RAW; - } - } - - @Override - public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { - File sourceFile = createTemplateFromDirectDownloadFileValidate(templateFilePath, destTemplatePath, destPool, format); - LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString()); - KVMPhysicalDisk sourceDisk = destPool.getPhysicalDisk(sourceFile.getAbsolutePath()); + KVMPhysicalDisk sourceDisk = destPool.getPhysicalDisk(templateFilePath); return copyPhysicalDisk(sourceDisk, destTemplatePath, destPool, timeout, null, null, Storage.ProvisioningType.THIN); } @Override public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout, byte[] srcPassphrase, byte[] dstPassphrase, Storage.ProvisioningType provisioningType) { + if (StringUtils.isEmpty(name) || disk == null || destPool == null) { + LOGGER.error("Unable to copy physical disk due to insufficient data"); + throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data"); + } - validateForDiskCopy(disk, name, destPool); LOGGER.info("Copying FROM source physical disk " + disk.getPath() + ", size: " + disk.getSize() + ", virtualsize: " + disk.getVirtualSize()+ ", format: " + disk.getFormat()); KVMPhysicalDisk destDisk = destPool.getPhysicalDisk(name); @@ -367,60 +361,34 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { LOGGER.info("Copying TO destination physical disk " + destDisk.getPath() + ", size: " + destDisk.getSize() + ", virtualsize: " + destDisk.getVirtualSize()+ ", format: " + destDisk.getFormat()); QemuImgFile srcFile = new QemuImgFile(disk.getPath(), disk.getFormat()); QemuImgFile destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); - LOGGER.debug("Starting COPY from source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath()); + + LOGGER.debug("Starting COPY from source path " + srcFile.getFileName() + " to target volume path: " + destDisk.getPath()); + ScriptResult result = runScript(copyScript, timeout, destDisk.getFormat().toString().toLowerCase(), srcFile.getFileName(), destFile.getFileName()); int rc = result.getExitCode(); if (rc != 0) { throw new CloudRuntimeException("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + rc + " - " + result.getResult()); } - LOGGER.debug("Successfully converted source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath() + " " + result.getResult()); + LOGGER.debug("Successfully converted source volume at " + srcFile.getFileName() + " to destination volume: " + destDisk.getPath() + " " + result.getResult()); return destDisk; } - void validateForDiskCopy(KVMPhysicalDisk disk, String name, KVMStoragePool destPool) { - if (StringUtils.isEmpty(name) || disk == null || destPool == null) { - LOGGER.error("Unable to copy physical disk due to insufficient data"); - throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data"); - } - } - - /** - * Copy a disk path to another disk path using QemuImg command - * @param disk - * @param destDisk - * @param name - * @param timeout - */ - void qemuCopy(KVMPhysicalDisk disk, KVMPhysicalDisk destDisk, String name, int timeout) { - QemuImg qemu; - try { - qemu = new QemuImg(timeout); - } catch (LibvirtException | QemuImgException e) { - throw new CloudRuntimeException (e); - } - QemuImgFile srcFile = null; - QemuImgFile destFile = null; - - try { - srcFile = new QemuImgFile(disk.getPath(), disk.getFormat()); - destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); - - LOGGER.debug("Starting copy from source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath()); - qemu.convert(srcFile, destFile, true); - LOGGER.debug("Successfully converted source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath()); - } catch (QemuImgException | LibvirtException e) { - try { - Map srcInfo = qemu.info(srcFile); - LOGGER.debug("Source disk info: " + Arrays.asList(srcInfo)); - } catch (Exception ignored) { - LOGGER.warn("Unable to get info from source disk: " + disk.getName()); - } - - String errMsg = String.format("Unable to convert/copy from %s to %s, due to: %s", disk.getName(), name, ((StringUtils.isEmpty(e.getMessage())) ? "an unknown error" : e.getMessage())); - LOGGER.error(errMsg); - throw new CloudRuntimeException(errMsg, e); + private static final ScriptResult runScript(String script, long timeout, String...args) { + ScriptResult result = new ScriptResult(); + Script cmd = new Script(script, Duration.millis(timeout), LOGGER); + cmd.add(args); + OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); + String output = cmd.execute(parser); + // its possible the process never launches which causes an NPE on getExitValue below + if (output != null && output.contains("Unable to execute the command")) { + result.setResult(output); + result.setExitCode(-1); + return result; } + result.setResult(output); + result.setExitCode(cmd.getExitValue()); + return result; } @Override @@ -461,25 +429,9 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { } } - private static final ScriptResult runScript(String script, long timeout, String...args) { - ScriptResult result = new ScriptResult(); - Script cmd = new Script(script, Duration.millis(timeout), LOGGER); - cmd.add(args); - OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); - String output = cmd.execute(parser); - // its possible the process never launches which causes an NPE on getExitValue below - if (output != null && output.contains("Unable to execute the command")) { - result.setResult(output); - result.setExitCode(-1); - return result; - } - result.setResult(output); - result.setExitCode(cmd.getExitValue()); - return result; - } - boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) { LOGGER.debug("Waiting for the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid() + " to become available for " + waitTimeInSec + " secs"); + long scriptTimeoutSecs = 30; // how long to wait for each script execution to run long maxTries = 10; // how many max retries to attempt the script long waitTimeInMillis = waitTimeInSec * 1000; // how long overall to wait @@ -557,40 +509,6 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { return false; } - void runConnectScript(String lun, AddressInfo address) { - try { - ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress()); - Process p = builder.start(); - int rc = p.waitFor(); - StringBuffer output = new StringBuffer(); - if (rc == 0) { - BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream())); - String line = null; - while ((line = input.readLine()) != null) { - output.append(line); - output.append(" "); - } - } else { - LOGGER.warn("Failure discovering LUN via " + connectScript); - BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream())); - String line = null; - while ((line = error.readLine()) != null) { - LOGGER.warn("error --> " + line); - } - } - } catch (IOException | InterruptedException e) { - throw new CloudRuntimeException("Problem performing scan on SCSI hosts", e); - } - } - - void sleep(long sleepTimeMs) { - try { - Thread.sleep(sleepTimeMs); - } catch (Exception ex) { - // don't do anything - } - } - long getPhysicalDiskSize(String diskPath) { if (StringUtils.isEmpty(diskPath)) { return 0; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java index 9190510c7a7..b33f49404ad 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java @@ -27,6 +27,7 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; import org.apache.cloudstack.utils.cryptsetup.CryptSetup; import org.apache.cloudstack.utils.cryptsetup.CryptSetupException; @@ -43,6 +44,8 @@ import org.libvirt.LibvirtException; import com.cloud.storage.Storage; import com.cloud.storage.StorageManager; +import com.cloud.utils.Pair; +import com.cloud.utils.Ternary; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; @@ -154,6 +157,11 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor { return MapStorageUuidToStoragePool.remove(uuid) != null; } + @Override + public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, QemuImg.PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { + return createPhysicalDisk(name, pool, format, provisioningType, size, null, passphrase); + } + /** * ScaleIO doesn't need to communicate with the hypervisor normally to create a volume. This is used only to prepare a ScaleIO data disk for encryption. * Thin encrypted volumes are provisioned in QCOW2 format, which insulates the guest from zeroes/unallocated blocks in the block device that would @@ -163,11 +171,12 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor { * @param format disk format * @param provisioningType provisioning type * @param size disk size + * @param usableSize usage disk size * @param passphrase passphrase * @return the disk object */ @Override - public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, QemuImg.PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { + public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, QemuImg.PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, Long usableSize, byte[] passphrase) { if (passphrase == null || passphrase.length == 0) { return null; } @@ -185,7 +194,12 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor { QemuImg qemuImg = new QemuImg(0, true, false); Map options = new HashMap<>(); List qemuObjects = new ArrayList<>(); - long formattedSize = getUsableBytesFromRawBytes(disk.getSize()); + long formattedSize; + if (usableSize != null && usableSize > 0) { + formattedSize = usableSize; + } else { + formattedSize = getUsableBytesFromRawBytes(disk.getSize()); + } options.put("preallocation", QemuImg.PreallocationType.Metadata.toString()); qemuObjects.add(QemuObject.prepareSecretForQemuImg(disk.getFormat(), disk.getQemuEncryptFormat(), keyFile.toString(), "sec0", options)); @@ -553,6 +567,67 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor { qemu.resize(options, objects, usableSizeBytes); } + public Ternary, String> prepareStorageClient(Storage.StoragePoolType type, String uuid, Map details) { + if (!ScaleIOUtil.isSDCServiceInstalled()) { + logger.debug("SDC service not installed on host, preparing the SDC client not possible"); + return new Ternary<>(false, null, "SDC service not installed on host"); + } + + if (!ScaleIOUtil.isSDCServiceEnabled()) { + logger.debug("SDC service not enabled on host, enabling it"); + if (!ScaleIOUtil.enableSDCService()) { + return new Ternary<>(false, null, "SDC service not enabled on host"); + } + } + + if (!ScaleIOUtil.isSDCServiceActive()) { + if (!ScaleIOUtil.startSDCService()) { + return new Ternary<>(false, null, "Couldn't start SDC service on host"); + } + } else if (!ScaleIOUtil.restartSDCService()) { + return new Ternary<>(false, null, "Couldn't restart SDC service on host"); + } + + return new Ternary<>( true, getSDCDetails(details), "Prepared client successfully"); + } + + public Pair unprepareStorageClient(Storage.StoragePoolType type, String uuid) { + if (!ScaleIOUtil.isSDCServiceInstalled()) { + logger.debug("SDC service not installed on host, no need to unprepare the SDC client"); + return new Pair<>(true, "SDC service not installed on host, no need to unprepare the SDC client"); + } + + if (!ScaleIOUtil.isSDCServiceEnabled()) { + logger.debug("SDC service not enabled on host, no need to unprepare the SDC client"); + return new Pair<>(true, "SDC service not enabled on host, no need to unprepare the SDC client"); + } + + if (!ScaleIOUtil.stopSDCService()) { + return new Pair<>(false, "Couldn't stop SDC service on host"); + } + + return new Pair<>(true, "Unprepared SDC client successfully"); + } + + private Map getSDCDetails(Map details) { + Map sdcDetails = new HashMap(); + if (details == null || !details.containsKey(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID)) { + return sdcDetails; + } + + String storageSystemId = details.get(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID); + String sdcId = ScaleIOUtil.getSdcId(storageSystemId); + if (sdcId != null) { + sdcDetails.put(ScaleIOGatewayClient.SDC_ID, sdcId); + } else { + String sdcGuId = ScaleIOUtil.getSdcGuid(); + if (sdcGuId != null) { + sdcDetails.put(ScaleIOGatewayClient.SDC_GUID, sdcGuId); + } + } + return sdcDetails; + } + /** * Calculates usable size from raw size, assuming qcow2 requires 192k/1GB for metadata * We also remove 128MiB for encryption/fragmentation/safety factor. diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePool.java index 293ff29f984..77f21910da6 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePool.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePool.java @@ -72,6 +72,11 @@ public class ScaleIOStoragePool implements KVMStoragePool { } } + @Override + public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, QemuImg.PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, Long usableSize, byte[] passphrase) { + return this.storageAdaptor.createPhysicalDisk(volumeUuid, this, format, provisioningType, size, usableSize, passphrase); + } + @Override public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, QemuImg.PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { return this.storageAdaptor.createPhysicalDisk(volumeUuid, this, format, provisioningType, size, passphrase); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java index 34bf08f4496..9a27d44ff92 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.hypervisor.kvm.storage; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -23,6 +24,8 @@ import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import com.cloud.storage.Storage; import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.utils.Pair; +import com.cloud.utils.Ternary; public interface StorageAdaptor { @@ -41,6 +44,11 @@ public interface StorageAdaptor { public boolean deleteStoragePool(String uuid); + public default KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, + PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, Long usableSize, byte[] passphrase) { + return createPhysicalDisk(name, pool, format, provisioningType, size, passphrase); + } + public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase); @@ -52,8 +60,17 @@ public interface StorageAdaptor { public boolean disconnectPhysicalDisk(Map volumeToDisconnect); - // given local path to file/device (per Libvirt XML), 1) check that device is - // handled by your adaptor, return false if not. 2) clean up device, return true + /** + * Given local path to file/device (per Libvirt XML), + * 1) Make sure to check that device is handled by your adaptor, return false if not. + * 2) clean up device, return true + * 3) if clean up fails, then return false + * + * If the method wrongly returns true, then there are chances that disconnect will not reach the right storage adapter + * + * @param localPath path for the file/device from the disk definition per Libvirt XML. + * @return true if the operation is successful; false if the operation fails or the adapter fails to handle the path. + */ public boolean disconnectPhysicalDiskByPath(String localPath); public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.ImageFormat format); @@ -100,4 +117,25 @@ public interface StorageAdaptor { default boolean supportsPhysicalDiskCopy(StoragePoolType type) { return StoragePoolType.PowerFlex == type; } + + /** + * Prepares the storage client. + * @param type type of the storage pool + * @param uuid uuid of the storage pool + * @param details any details of the storage pool that are required for client preparation + * @return status, client details, & message in case failed + */ + default Ternary, String> prepareStorageClient(StoragePoolType type, String uuid, Map details) { + return new Ternary<>(true, new HashMap<>(), ""); + } + + /** + * Unprepares the storage client. + * @param type type of the storage pool + * @param uuid uuid of the storage pool + * @return status, & message in case failed + */ + default Pair unprepareStorageClient(StoragePoolType type, String uuid) { + return new Pair<>(true, ""); + } } diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java index e061f1e8952..81daabf59d7 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.kvm.ha; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor; @@ -36,7 +37,6 @@ import org.apache.cloudstack.outofbandmanagement.OutOfBandManagementService; import org.joda.time.DateTime; import javax.inject.Inject; -import java.security.InvalidParameterException; public final class KVMHAProvider extends HAAbstractHostProvider implements HAProvider, Configurable { @@ -129,7 +129,7 @@ public final class KVMHAProvider extends HAAbstractHostProvider implements HAPro case MaxDegradedWaitTimeout: return KVMHAConfig.KvmHADegradedMaxPeriod.valueIn(clusterId); default: - throw new InvalidParameterException("Unknown HAProviderConfig " + name.toString()); + throw new InvalidParameterValueException("Unknown HAProviderConfig " + name.toString()); } } diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/network/tungsten/agent/api/SetupTfRouteCommand.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/network/tungsten/agent/api/SetupTfRouteCommand.java new file mode 100644 index 00000000000..8ccbff96d79 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/network/tungsten/agent/api/SetupTfRouteCommand.java @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.network.tungsten.agent.api; + +import com.cloud.agent.api.Command; + +import java.util.Objects; + +public class SetupTfRouteCommand extends Command { + private final String privateIp; + private final String publicIp; + private final String srcNetwork; + + public SetupTfRouteCommand(final String privateIp, final String publicIp, final String srcNetwork) { + this.privateIp = privateIp; + this.publicIp = publicIp; + this.srcNetwork = srcNetwork; + } + + public String getPrivateIp() { + return privateIp; + } + + public String getPublicIp() { + return publicIp; + } + + public String getSrcNetwork() { + return srcNetwork; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + SetupTfRouteCommand that = (SetupTfRouteCommand) o; + return Objects.equals(privateIp, that.privateIp) && Objects.equals(publicIp, that.publicIp) && Objects.equals(srcNetwork, that.srcNetwork); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), privateIp, publicIp, srcNetwork); + } + + @Override + public boolean executeInSequence() { + return false; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/network/tungsten/agent/api/SetupTungstenVRouterCommand.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/network/tungsten/agent/api/SetupTungstenVRouterCommand.java new file mode 100644 index 00000000000..00fc522363d --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/network/tungsten/agent/api/SetupTungstenVRouterCommand.java @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.network.tungsten.agent.api; + +import com.cloud.agent.api.Command; + +import java.util.Objects; + +public class SetupTungstenVRouterCommand extends Command { + private final String oper; + private final String inf; + private final String subnet; + private final String route; + private final String vrf; + + public SetupTungstenVRouterCommand(final String oper, final String inf, final String subnet, final String route, + final String vrf) { + this.oper = oper; + this.inf = inf; + this.subnet = subnet; + this.route = route; + this.vrf = vrf; + } + + public String getOper() { + return oper; + } + + public String getInf() { + return inf; + } + + public String getSubnet() { + return subnet; + } + + public String getRoute() { + return route; + } + + public String getVrf() { + return vrf; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + SetupTungstenVRouterCommand that = (SetupTungstenVRouterCommand) o; + return Objects.equals(oper, that.oper) && Objects.equals(inf, that.inf) && Objects.equals(subnet, that.subnet) && Objects.equals(route, that.route) && Objects.equals(vrf, that.vrf); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), oper, inf, subnet, route, vrf); + } + + @Override + public boolean executeInSequence() { + return false; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/network/tungsten/agent/api/UpdateTungstenLoadbalancerSslCommand.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/network/tungsten/agent/api/UpdateTungstenLoadbalancerSslCommand.java new file mode 100644 index 00000000000..5ab24c18aa0 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/network/tungsten/agent/api/UpdateTungstenLoadbalancerSslCommand.java @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.network.tungsten.agent.api; + +import com.cloud.agent.api.Command; + +import java.util.Objects; + +public class UpdateTungstenLoadbalancerSslCommand extends Command { + private final String lbUuid; + private final String sslCertName; + private final String certificateKey; + private final String privateKey; + private final String privateIp; + private final String port; + + public UpdateTungstenLoadbalancerSslCommand(final String lbUuid, final String sslCertName, + final String certificateKey, final String privateKey, final String privateIp, final String port) { + this.lbUuid = lbUuid; + this.sslCertName = sslCertName; + this.certificateKey = certificateKey; + this.privateKey = privateKey; + this.privateIp = privateIp; + this.port = port; + } + + public String getLbUuid() { + return lbUuid; + } + + public String getSslCertName() { + return sslCertName; + } + + public String getCertificateKey() { + return certificateKey; + } + + public String getPrivateKey() { + return privateKey; + } + + public String getPrivateIp() { + return privateIp; + } + + public String getPort() { + return port; + } + + @Override + public boolean executeInSequence() { + return false; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + UpdateTungstenLoadbalancerSslCommand that = (UpdateTungstenLoadbalancerSslCommand) o; + return Objects.equals(lbUuid, that.lbUuid) && Objects.equals(sslCertName, that.sslCertName) && Objects.equals(certificateKey, that.certificateKey) && Objects.equals(privateKey, that.privateKey) && Objects.equals(privateIp, that.privateIp) && Objects.equals(port, that.port); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), lbUuid, sslCertName, certificateKey, privateKey, privateIp, port); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/network/tungsten/agent/api/UpdateTungstenLoadbalancerStatsCommand.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/network/tungsten/agent/api/UpdateTungstenLoadbalancerStatsCommand.java new file mode 100644 index 00000000000..d7b2088bcd7 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/network/tungsten/agent/api/UpdateTungstenLoadbalancerStatsCommand.java @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.network.tungsten.agent.api; + +import com.cloud.agent.api.Command; + +import java.util.Objects; + +public class UpdateTungstenLoadbalancerStatsCommand extends Command { + private final String lbUuid; + private final String lbStatsPort; + private final String lbStatsUri; + private final String lbStatsAuth; + + public UpdateTungstenLoadbalancerStatsCommand(final String lbUuid, final String lbStatsPort, + final String lbStatsUri, final String lbStatsAuth) { + this.lbUuid = lbUuid; + this.lbStatsPort = lbStatsPort; + this.lbStatsUri = lbStatsUri; + this.lbStatsAuth = lbStatsAuth; + } + + public String getLbUuid() { + return lbUuid; + } + + public String getLbStatsPort() { + return lbStatsPort; + } + + public String getLbStatsUri() { + return lbStatsUri; + } + + public String getLbStatsAuth() { + return lbStatsAuth; + } + + @Override + public boolean executeInSequence() { + return false; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + UpdateTungstenLoadbalancerStatsCommand that = (UpdateTungstenLoadbalancerStatsCommand) o; + return Objects.equals(lbUuid, that.lbUuid) && Objects.equals(lbStatsPort, that.lbStatsPort) && Objects.equals(lbStatsUri, that.lbStatsUri) && Objects.equals(lbStatsAuth, that.lbStatsAuth); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), lbUuid, lbStatsPort, lbStatsUri, lbStatsAuth); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/linux/KVMHostInfo.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/linux/KVMHostInfo.java index 4293ee75f7e..c0b416410cb 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/linux/KVMHostInfo.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/linux/KVMHostInfo.java @@ -53,12 +53,14 @@ public class KVMHostInfo { private int allocatableCpus; private int cpusockets; private long cpuSpeed; + private String cpuArch; private long totalMemory; private long reservedMemory; private long overCommitMemory; private List capabilities = new ArrayList<>(); private static String cpuInfoFreqFileName = "/sys/devices/system/cpu/cpu0/cpufreq/base_frequency"; + private static String cpuArchCommand = "/usr/bin/arch"; public KVMHostInfo(long reservedMemory, long overCommitMemory, long manualSpeed, int reservedCpus) { this.cpuSpeed = manualSpeed; @@ -105,6 +107,10 @@ public class KVMHostInfo { return this.capabilities; } + public String getCpuArch() { + return cpuArch; + } + protected static long getCpuSpeed(final String cpabilities, final NodeInfo nodeInfo) { long speed = 0L; speed = getCpuSpeedFromCommandLscpu(); @@ -201,6 +207,7 @@ public class KVMHostInfo { this.cpusockets = hosts.sockets * hosts.nodes; } this.totalCpus = hosts.cpus; + this.cpuArch = getCPUArchFromCommand(); final LibvirtCapXMLParser parser = new LibvirtCapXMLParser(); parser.parseCapabilitiesXML(capabilities); @@ -227,4 +234,9 @@ public class KVMHostInfo { LOGGER.error("Caught libvirt exception while fetching host information", e); } } + + private String getCPUArchFromCommand() { + LOGGER.info("Fetching host CPU arch"); + return Script.runSimpleBashScript(cpuArchCommand); + } } diff --git a/plugins/hypervisors/kvm/src/main/resources/META-INF/cloudstack/kvm-compute/spring-kvm-compute-context.xml b/plugins/hypervisors/kvm/src/main/resources/META-INF/cloudstack/kvm-compute/spring-kvm-compute-context.xml index 9bcfdd9c306..bedaf1851d1 100644 --- a/plugins/hypervisors/kvm/src/main/resources/META-INF/cloudstack/kvm-compute/spring-kvm-compute-context.xml +++ b/plugins/hypervisors/kvm/src/main/resources/META-INF/cloudstack/kvm-compute/spring-kvm-compute-context.xml @@ -37,5 +37,5 @@ - + diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java index ecb34adc6ed..30d0b2ab163 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java @@ -82,6 +82,7 @@ import org.libvirt.DomainInfo.DomainState; import org.libvirt.DomainInterfaceStats; import org.libvirt.LibvirtException; import org.libvirt.MemoryStatistic; +import org.libvirt.NodeInfo; import org.libvirt.SchedUlongParameter; import org.libvirt.StorageVol; import org.libvirt.VcpuInfo; @@ -93,7 +94,6 @@ import org.mockito.MockedConstruction; import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.Spy; -import org.mockito.invocation.InvocationOnMock; import org.mockito.junit.MockitoJUnitRunner; import org.w3c.dom.Document; import org.xml.sax.SAXException; @@ -243,10 +243,18 @@ public class LibvirtComputingResourceTest { @Mock Domain domainMock; + @Mock + DomainInfo domainInfoMock; + @Mock + DomainInterfaceStats domainInterfaceStatsMock; + @Mock + DomainBlockStats domainBlockStatsMock; private final static long HYPERVISOR_LIBVIRT_VERSION_SUPPORTS_IOURING = 6003000; private final static long HYPERVISOR_QEMU_VERSION_SUPPORTS_IOURING = 5000000; + private final static String VM_NAME = "test"; + String hyperVisorType = "kvm"; Random random = new Random(); final String memInfo = "MemTotal: 5830236 kB\n" + @@ -922,86 +930,6 @@ public class LibvirtComputingResourceTest { assertEquals(uuid, oldUuid); } - private static final String VMNAME = "test"; - - @Test - public void testGetVmStat() throws LibvirtException { - final Connect connect = Mockito.mock(Connect.class); - final Domain domain = Mockito.mock(Domain.class); - final DomainInfo domainInfo = new DomainInfo(); - final MemoryStatistic[] domainMem = new MemoryStatistic[2]; - domainMem[0] = Mockito.mock(MemoryStatistic.class); - Mockito.when(domain.getInfo()).thenReturn(domainInfo); - Mockito.when(domain.memoryStats(20)).thenReturn(domainMem); - Mockito.when(domainMem[0].getTag()).thenReturn(4); - Mockito.when(connect.domainLookupByName(VMNAME)).thenReturn(domain); - - // this is testing the interface stats, returns an increasing number of sent and received bytes - - Mockito.when(domain.interfaceStats(nullable(String.class))).thenAnswer(new org.mockito.stubbing.Answer() { - // increment with less than a KB, so this should be less than 1 KB - final static int increment = 1000; - int rxBytes = 1000; - int txBytes = 1000; - - @Override - public DomainInterfaceStats answer(final InvocationOnMock invocation) throws Throwable { - final DomainInterfaceStats domainInterfaceStats = new DomainInterfaceStats(); - domainInterfaceStats.rx_bytes = rxBytes += increment; - domainInterfaceStats.tx_bytes = txBytes += increment; - return domainInterfaceStats; - - } - - }); - - - Mockito.when(domain.blockStats(nullable(String.class))).thenAnswer(new org.mockito.stubbing.Answer() { - // a little less than a KB - final static int increment = 1000; - - int rdBytes = 0; - int wrBytes = 1024; - - @Override - public DomainBlockStats answer(final InvocationOnMock invocation) throws Throwable { - final DomainBlockStats domainBlockStats = new DomainBlockStats(); - - domainBlockStats.rd_bytes = rdBytes += increment; - domainBlockStats.wr_bytes = wrBytes += increment; - return domainBlockStats; - } - - }); - - final LibvirtComputingResource libvirtComputingResource = new LibvirtComputingResource() { - @Override - public List getInterfaces(final Connect conn, final String vmName) { - final InterfaceDef interfaceDef = new InterfaceDef(); - return Arrays.asList(interfaceDef); - } - - @Override - public List getDisks(final Connect conn, final String vmName) { - final DiskDef diskDef = new DiskDef(); - return Arrays.asList(diskDef); - } - - }; - libvirtComputingResource.getVmStat(connect, VMNAME); - final VmStatsEntry vmStat = libvirtComputingResource.getVmStat(connect, VMNAME); - // network traffic as generated by the logic above, must be greater than zero - Assert.assertTrue(vmStat.getNetworkReadKBs() > 0); - Assert.assertTrue(vmStat.getNetworkWriteKBs() > 0); - // IO traffic as generated by the logic above, must be greater than zero - Assert.assertTrue(vmStat.getDiskReadKBs() > 0); - Assert.assertTrue(vmStat.getDiskWriteKBs() > 0); - // Memory limit of VM must be greater than zero - Assert.assertTrue(vmStat.getIntFreeMemoryKBs() >= 0); - Assert.assertTrue(vmStat.getMemoryKBs() >= 0); - Assert.assertTrue(vmStat.getTargetMemoryKBs() >= vmStat.getMemoryKBs()); - } - /* * New Tests */ @@ -6331,4 +6259,160 @@ public class LibvirtComputingResourceTest { Assert.assertEquals("", StringUtils.join(hostTagsList, ",")); } } + + @Test + public void getVmStatTestVmIsNullReturnsNull() throws LibvirtException { + doReturn(null).when(libvirtComputingResourceSpy).getDomain(connMock, VM_NAME); + + VmStatsEntry stat = libvirtComputingResourceSpy.getVmStat(connMock, VM_NAME); + + verify(libvirtComputingResourceSpy).getDomain(connMock, VM_NAME); + verify(libvirtComputingResourceSpy, never()).getVmCurrentStats(domainMock); + verify(libvirtComputingResourceSpy, never()).calculateVmMetrics(Mockito.any(), Mockito.any(), Mockito.any()); + Assert.assertNull(stat); + } + + @Test + public void getVmStatTestVmIsNotNullReturnsMetrics() throws LibvirtException { + doReturn(domainMock).when(libvirtComputingResourceSpy).getDomain(connMock, VM_NAME); + doReturn(Mockito.mock(LibvirtExtendedVmStatsEntry.class)).when(libvirtComputingResourceSpy).getVmCurrentStats(domainMock); + doReturn(Mockito.mock(VmStatsEntry.class)).when(libvirtComputingResourceSpy).calculateVmMetrics(Mockito.any(), Mockito.any(), Mockito.any()); + + VmStatsEntry stat = libvirtComputingResourceSpy.getVmStat(connMock, VM_NAME); + + verify(libvirtComputingResourceSpy).getDomain(connMock, VM_NAME); + verify(libvirtComputingResourceSpy).getVmCurrentStats(domainMock); + verify(libvirtComputingResourceSpy).calculateVmMetrics(Mockito.any(), Mockito.any(), Mockito.any()); + Assert.assertNotNull(stat); + } + + private void prepareVmInfoForGetVmCurrentStats() throws LibvirtException { + final NodeInfo nodeInfo = new NodeInfo(); + nodeInfo.cpus = 8; + nodeInfo.memory = 8 * 1024 * 1024; + nodeInfo.sockets = 2; + nodeInfo.threads = 2; + nodeInfo.model = "Foo processor"; + + Mockito.when(domainMock.getName()).thenReturn(VM_NAME); + Mockito.when(domainMock.getConnect()).thenReturn(connMock); + domainInfoMock.cpuTime = 500L; + domainInfoMock.nrVirtCpu = 4; + domainInfoMock.memory = 2048; + domainInfoMock.maxMem = 4096; + Mockito.when(domainMock.getInfo()).thenReturn(domainInfoMock); + final MemoryStatistic[] domainMem = new MemoryStatistic[2]; + domainMem[0] = Mockito.mock(MemoryStatistic.class); + doReturn(1024L).when(libvirtComputingResourceSpy).getMemoryFreeInKBs(domainMock); + + domainInterfaceStatsMock.rx_bytes = 1000L; + domainInterfaceStatsMock.tx_bytes = 2000L; + doReturn(domainInterfaceStatsMock).when(domainMock).interfaceStats(Mockito.any()); + doReturn(List.of(new InterfaceDef())).when(libvirtComputingResourceSpy).getInterfaces(connMock, VM_NAME); + + domainBlockStatsMock.rd_req = 3000L; + domainBlockStatsMock.rd_bytes = 4000L; + domainBlockStatsMock.wr_req = 5000L; + domainBlockStatsMock.wr_bytes = 6000L; + doReturn(domainBlockStatsMock).when(domainMock).blockStats(Mockito.any()); + doReturn(List.of(new DiskDef())).when(libvirtComputingResourceSpy).getDisks(connMock, VM_NAME); + } + + @Test + public void getVmCurrentStatsTestIfStatsAreAsExpected() throws LibvirtException { + prepareVmInfoForGetVmCurrentStats(); + + LibvirtExtendedVmStatsEntry vmStatsEntry = libvirtComputingResourceSpy.getVmCurrentStats(domainMock); + + Assert.assertEquals(domainInfoMock.cpuTime, vmStatsEntry.getCpuTime()); + Assert.assertEquals((double) domainInterfaceStatsMock.rx_bytes / 1024, vmStatsEntry.getNetworkReadKBs(), 0); + Assert.assertEquals((double) domainInterfaceStatsMock.tx_bytes / 1024, vmStatsEntry.getNetworkWriteKBs(), 0); + Assert.assertEquals(domainBlockStatsMock.rd_req, vmStatsEntry.getDiskReadIOs(), 0); + Assert.assertEquals((double) domainBlockStatsMock.rd_bytes / 1024, vmStatsEntry.getDiskReadKBs(), 0); + Assert.assertEquals(domainBlockStatsMock.wr_req, vmStatsEntry.getDiskWriteIOs(), 0); + Assert.assertEquals((double) domainBlockStatsMock.wr_bytes / 1024, vmStatsEntry.getDiskWriteKBs(), 0); + Assert.assertNotNull(vmStatsEntry.getTimestamp()); + } + + @Test + public void getVmCurrentCpuStatsTestIfStatsAreAsExpected() throws LibvirtException { + prepareVmInfoForGetVmCurrentStats(); + + LibvirtExtendedVmStatsEntry vmStatsEntry = new LibvirtExtendedVmStatsEntry(); + libvirtComputingResourceSpy.getVmCurrentCpuStats(domainMock, vmStatsEntry); + + Assert.assertEquals(domainInfoMock.cpuTime, vmStatsEntry.getCpuTime()); + } + + @Test + public void getVmCurrentNetworkStatsTestIfStatsAreAsExpected() throws LibvirtException { + prepareVmInfoForGetVmCurrentStats(); + + LibvirtExtendedVmStatsEntry vmStatsEntry = new LibvirtExtendedVmStatsEntry(); + libvirtComputingResourceSpy.getVmCurrentNetworkStats(domainMock, vmStatsEntry); + + Assert.assertEquals((double) domainInterfaceStatsMock.rx_bytes / 1024, vmStatsEntry.getNetworkReadKBs(), 0); + Assert.assertEquals((double) domainInterfaceStatsMock.tx_bytes / 1024, vmStatsEntry.getNetworkWriteKBs(), 0); + } + + @Test + public void getVmCurrentDiskStatsTestIfStatsAreAsExpected() throws LibvirtException { + prepareVmInfoForGetVmCurrentStats(); + + LibvirtExtendedVmStatsEntry vmStatsEntry = new LibvirtExtendedVmStatsEntry(); + libvirtComputingResourceSpy.getVmCurrentDiskStats(domainMock, vmStatsEntry); + + Assert.assertEquals(domainBlockStatsMock.rd_req, vmStatsEntry.getDiskReadIOs(), 0); + Assert.assertEquals((double) domainBlockStatsMock.rd_bytes / 1024, vmStatsEntry.getDiskReadKBs(), 0); + Assert.assertEquals(domainBlockStatsMock.wr_req, vmStatsEntry.getDiskWriteIOs(), 0); + Assert.assertEquals((double) domainBlockStatsMock.wr_bytes / 1024, vmStatsEntry.getDiskWriteKBs(), 0); + } + + @Test + public void calculateVmMetricsTestOldStatsIsNullDoesNotCalculateUtilization() throws LibvirtException { + prepareVmInfoForGetVmCurrentStats(); + + LibvirtExtendedVmStatsEntry vmStatsEntry = libvirtComputingResourceSpy.getVmCurrentStats(domainMock); + VmStatsEntry metrics = libvirtComputingResourceSpy.calculateVmMetrics(domainMock, null, vmStatsEntry); + + Assert.assertEquals(domainInfoMock.nrVirtCpu, metrics.getNumCPUs()); + Assert.assertEquals(domainInfoMock.maxMem, (long) metrics.getMemoryKBs()); + Assert.assertEquals(libvirtComputingResourceSpy.getMemoryFreeInKBs(domainMock), (long) metrics.getIntFreeMemoryKBs()); + Assert.assertEquals(domainInfoMock.memory, (long) metrics.getTargetMemoryKBs()); + Assert.assertEquals(0, metrics.getCPUUtilization(), 0); + Assert.assertEquals(0, metrics.getNetworkReadKBs(), 0); + Assert.assertEquals(0, metrics.getNetworkWriteKBs(), 0); + Assert.assertEquals(0, metrics.getDiskReadKBs(), 0); + Assert.assertEquals(0, metrics.getDiskReadIOs(), 0); + Assert.assertEquals(0, metrics.getDiskWriteKBs(), 0); + Assert.assertEquals(0, metrics.getDiskWriteIOs(), 0); + } + + @Test + public void calculateVmMetricsTestOldStatsIsNotNullCalculatesUtilization() throws LibvirtException { + prepareVmInfoForGetVmCurrentStats(); + LibvirtExtendedVmStatsEntry oldStats = libvirtComputingResourceSpy.getVmCurrentStats(domainMock); + domainInfoMock.cpuTime *= 3; + domainInterfaceStatsMock.rx_bytes *= 3; + domainInterfaceStatsMock.tx_bytes *= 3; + domainBlockStatsMock.rd_req *= 3; + domainBlockStatsMock.rd_bytes *= 3; + domainBlockStatsMock.wr_req *= 3; + domainBlockStatsMock.wr_bytes *= 3; + LibvirtExtendedVmStatsEntry newStats = libvirtComputingResourceSpy.getVmCurrentStats(domainMock); + + VmStatsEntry metrics = libvirtComputingResourceSpy.calculateVmMetrics(domainMock, oldStats, newStats); + + Assert.assertEquals(domainInfoMock.nrVirtCpu, metrics.getNumCPUs()); + Assert.assertEquals(domainInfoMock.maxMem, (long) metrics.getMemoryKBs()); + Assert.assertEquals(libvirtComputingResourceSpy.getMemoryFreeInKBs(domainMock), (long) metrics.getIntFreeMemoryKBs()); + Assert.assertEquals(domainInfoMock.memory, (long) metrics.getTargetMemoryKBs()); + Assert.assertTrue(metrics.getCPUUtilization() > 0); + Assert.assertEquals(newStats.getNetworkReadKBs() - oldStats.getNetworkReadKBs(), metrics.getNetworkReadKBs(), 0); + Assert.assertEquals(newStats.getNetworkWriteKBs() - oldStats.getNetworkWriteKBs(), metrics.getNetworkWriteKBs(), 0); + Assert.assertEquals(newStats.getDiskReadIOs() - oldStats.getDiskReadIOs(), metrics.getDiskReadIOs(), 0); + Assert.assertEquals(newStats.getDiskWriteIOs() - oldStats.getDiskWriteIOs(), metrics.getDiskWriteIOs(), 0); + Assert.assertEquals(newStats.getDiskReadKBs() - oldStats.getDiskReadKBs(), metrics.getDiskReadKBs(), 0); + Assert.assertEquals(newStats.getDiskWriteKBs() - oldStats.getDiskWriteKBs(), metrics.getDiskWriteKBs(), 0); + } } diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDefTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDefTest.java index 51a47e9a025..712b38b0bb4 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDefTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDefTest.java @@ -19,6 +19,9 @@ package com.cloud.hypervisor.kvm.resource; +import java.util.ArrayList; +import java.util.List; + import junit.framework.TestCase; import com.cloud.hypervisor.kvm.resource.LibvirtStoragePoolDef.PoolType; import com.cloud.hypervisor.kvm.resource.LibvirtStoragePoolDef.AuthenticationType; @@ -47,6 +50,14 @@ public class LibvirtStoragePoolDefTest extends TestCase { assertEquals(port, pool.getSourcePort()); assertEquals(dir, pool.getSourceDir()); assertEquals(targetPath, pool.getTargetPath()); + + List nfsMountOpts = new ArrayList<>(); + nfsMountOpts.add("vers=4.1"); + nfsMountOpts.add("nconnect=4"); + pool = new LibvirtStoragePoolDef(type, name, uuid, host, dir, targetPath, nfsMountOpts); + assertTrue(pool.getNfsMountOpts().contains("vers=4.1")); + assertTrue(pool.getNfsMountOpts().contains("nconnect=4")); + assertEquals(pool.getNfsMountOpts().size(), 2); } @Test @@ -57,12 +68,38 @@ public class LibvirtStoragePoolDefTest extends TestCase { String host = "127.0.0.1"; String dir = "/export/primary"; String targetPath = "/mnt/" + uuid; + List nfsMountOpts = new ArrayList<>(); + nfsMountOpts.add("vers=4.1"); + nfsMountOpts.add("nconnect=4"); - LibvirtStoragePoolDef pool = new LibvirtStoragePoolDef(type, name, uuid, host, dir, targetPath); + LibvirtStoragePoolDef pool = new LibvirtStoragePoolDef(type, name, uuid, host, dir, targetPath, nfsMountOpts); - String expectedXml = "\n" + name + "\n" + uuid + "\n" + + String expectedXml = "\n" + + "" +name + "\n" + uuid + "\n" + "\n\n\n\n\n" + - "" + targetPath + "\n\n\n"; + "" + targetPath + "\n\n" + + "\n\n\n\n\n"; + + assertEquals(expectedXml, pool.toString()); + } + + @Test + public void testGlusterFSStoragePool() { + PoolType type = PoolType.GLUSTERFS; + String name = "myGFSPool"; + String uuid = "89a605bc-d470-4637-b3df-27388be452f5"; + String host = "127.0.0.1"; + String dir = "/export/primary"; + String targetPath = "/mnt/" + uuid; + List nfsMountOpts = new ArrayList<>(); + + LibvirtStoragePoolDef pool = new LibvirtStoragePoolDef(type, name, uuid, host, dir, targetPath, nfsMountOpts); + + String expectedXml = "\n" + + "" +name + "\n" + uuid + "\n" + + "\n\n\n" + + "\n\n\n" + + "" + targetPath + "\n\n\n"; assertEquals(expectedXml, pool.toString()); } diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParserTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParserTest.java index 3637b7b1f9b..5854c21186f 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParserTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParserTest.java @@ -30,7 +30,7 @@ public class LibvirtStoragePoolXMLParserTest extends TestCase { @Test public void testParseNfsStoragePoolXML() { - String poolXML = "\n" + + String poolXML = "\n" + " feff06b5-84b2-3258-b5f9-1953217295de\n" + " feff06b5-84b2-3258-b5f9-1953217295de\n" + " 111111111\n" + @@ -49,12 +49,18 @@ public class LibvirtStoragePoolXMLParserTest extends TestCase { " 0\n" + " \n" + " \n" + + " \n" + + " \n" + + " \n" + + " \n" + ""; LibvirtStoragePoolXMLParser parser = new LibvirtStoragePoolXMLParser(); LibvirtStoragePoolDef pool = parser.parseStoragePoolXML(poolXML); - Assert.assertEquals("10.11.12.13", pool.getSourceHost()); + assertEquals("10.11.12.13", pool.getSourceHost()); + assertTrue(pool.getNfsMountOpts().contains("vers=4.1")); + assertTrue(pool.getNfsMountOpts().contains("nconnect=8")); } @Test diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckConvertInstanceCommandWrapperTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckConvertInstanceCommandWrapperTest.java new file mode 100644 index 00000000000..3cad9c27a68 --- /dev/null +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckConvertInstanceCommandWrapperTest.java @@ -0,0 +1,67 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +package com.cloud.hypervisor.kvm.resource.wrapper; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.apache.commons.lang3.StringUtils; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CheckConvertInstanceCommand; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; + +@RunWith(MockitoJUnitRunner.class) +public class LibvirtCheckConvertInstanceCommandWrapperTest { + + @Spy + private LibvirtCheckConvertInstanceCommandWrapper checkConvertInstanceCommandWrapper = Mockito.spy(LibvirtCheckConvertInstanceCommandWrapper.class); + + @Mock + private LibvirtComputingResource libvirtComputingResourceMock; + + @Mock + CheckConvertInstanceCommand checkConvertInstanceCommandMock; + + @Before + public void setUp() { + } + + @Test + public void testCheckInstanceCommand_success() { + Mockito.when(libvirtComputingResourceMock.hostSupportsInstanceConversion()).thenReturn(true); + Answer answer = checkConvertInstanceCommandWrapper.execute(checkConvertInstanceCommandMock, libvirtComputingResourceMock); + assertTrue(answer.getResult()); + } + + @Test + public void testCheckInstanceCommand_failure() { + Mockito.when(libvirtComputingResourceMock.hostSupportsInstanceConversion()).thenReturn(false); + Answer answer = checkConvertInstanceCommandWrapper.execute(checkConvertInstanceCommandMock, libvirtComputingResourceMock); + assertFalse(answer.getResult()); + assertTrue(StringUtils.isNotBlank(answer.getDetails())); + } +} diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConvertInstanceCommandWrapperTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConvertInstanceCommandWrapperTest.java index d70f5f08884..f0e94e59485 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConvertInstanceCommandWrapperTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConvertInstanceCommandWrapperTest.java @@ -18,6 +18,23 @@ // package com.cloud.hypervisor.kvm.resource.wrapper; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; + +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.vm.UnmanagedInstanceTO; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.MockedConstruction; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + import com.cloud.agent.api.Answer; import com.cloud.agent.api.ConvertInstanceCommand; import com.cloud.agent.api.to.NfsTO; @@ -32,22 +49,6 @@ import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; import com.cloud.storage.Storage; import com.cloud.utils.Pair; import com.cloud.utils.script.Script; -import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; -import org.apache.cloudstack.vm.UnmanagedInstanceTO; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.MockedConstruction; -import org.mockito.MockedStatic; -import org.mockito.Mockito; -import org.mockito.Spy; -import org.mockito.junit.MockitoJUnitRunner; - -import java.util.Arrays; -import java.util.List; -import java.util.UUID; @RunWith(MockitoJUnitRunner.class) public class LibvirtConvertInstanceCommandWrapperTest { @@ -71,12 +72,6 @@ public class LibvirtConvertInstanceCommandWrapperTest { private static final String secondaryPoolUrl = "nfs://192.168.1.1/secondary"; private static final String vmName = "VmToImport"; - private static final String hostName = "VmwareHost1"; - private static final String vmwareVcenter = "192.168.1.2"; - private static final String vmwareDatacenter = "Datacenter"; - private static final String vmwareCluster = "Cluster"; - private static final String vmwareUsername = "administrator@vsphere.local"; - private static final String vmwarePassword = "password"; @Before public void setUp() { @@ -88,15 +83,6 @@ public class LibvirtConvertInstanceCommandWrapperTest { Mockito.when(temporaryPool.listPhysicalDisks()).thenReturn(Arrays.asList(physicalDisk1, physicalDisk2)); } - @Test - public void testIsInstanceConversionSupportedOnHost() { - try (MockedStatic + diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java index ade9e8370a8..b3f49b015d1 100644 --- a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java +++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java @@ -22,12 +22,22 @@ package com.cloud.hypervisor.kvm.resource.wrapper; import static com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor.SP_LOG; import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.cloudstack.utils.cryptsetup.KeyFile; +import org.apache.cloudstack.utils.qemu.QemuImageOptions; import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.cloudstack.utils.qemu.QemuObject; +import org.apache.cloudstack.utils.qemu.QemuObject.EncryptFormat; import org.apache.commons.io.FileUtils; import com.cloud.agent.api.storage.StorPoolBackupSnapshotCommand; @@ -57,28 +67,24 @@ public final class StorPoolBackupSnapshotCommandWrapper extends CommandWrapper qemuObjects = new ArrayList<>(); + Map options = new HashMap<>(); + QemuImageOptions qemuImageOpts = new QemuImageOptions(srcPath); + final QemuImg qemu = new QemuImg(cmd.getWaitInMillSeconds()); final DataStoreTO dstDataStore = dst.getDataStore(); if (!(dstDataStore instanceof NfsTO)) { return new CopyCmdAnswer("Backup Storpool snapshot: Only NFS secondary supported at present!"); } secondaryPool = storagePoolMgr.getStoragePoolByURI(dstDataStore.getUrl()); + try (KeyFile srcKey = new KeyFile(src.getVolume().getPassphrase())) { - final String dstDir = secondaryPool.getLocalPath() + File.separator + dst.getPath(); - FileUtils.forceMkdir(new File(dstDir)); - - final String dstPath = dstDir + File.separator + dst.getName(); - final QemuImgFile dstFile = new QemuImgFile(dstPath, PhysicalDiskFormat.QCOW2); - - final QemuImg qemu = new QemuImg(cmd.getWaitInMillSeconds()); - qemu.convert(srcFile, dstFile); - - SP_LOG("StorpoolBackupSnapshotCommandWrapper srcFileFormat=%s, dstFileFormat=%s", srcFile.getFormat(), dstFile.getFormat()); - final File snapFile = new File(dstPath); - final long size = snapFile.exists() ? snapFile.length() : 0; + size = convertSnapshot(srcPath, secondaryPool, dst, srcKeyName, qemuObjects, options, qemuImageOpts, + qemu, srcKey); + } final SnapshotObjectTO snapshot = new SnapshotObjectTO(); snapshot.setPath(dst.getPath() + File.separator + dst.getName()); @@ -104,4 +110,31 @@ public final class StorPoolBackupSnapshotCommandWrapper extends CommandWrapper qemuObjects, Map options, + QemuImageOptions qemuImageOpts, final QemuImg qemu, KeyFile srcKey) throws IOException, QemuImgException { + long size; + final QemuImgFile srcFile = new QemuImgFile(srcPath, PhysicalDiskFormat.RAW); + + final String dstDir = secondaryPool.getLocalPath() + File.separator + dst.getPath(); + FileUtils.forceMkdir(new File(dstDir)); + + final String dstPath = dstDir + File.separator + dst.getName(); + final QemuImgFile dstFile = new QemuImgFile(dstPath, PhysicalDiskFormat.QCOW2); + if (srcKey.isSet()) { + qemuObjects.add(QemuObject.prepareSecretForQemuImg(PhysicalDiskFormat.RAW, EncryptFormat.LUKS, + srcKey.toString(), srcKeyName, options)); + qemuImageOpts = new QemuImageOptions(PhysicalDiskFormat.RAW, srcPath, srcKeyName); + dstFile.setFormat(PhysicalDiskFormat.LUKS); + } + + qemuImageOpts.setImageOptsFlag(true); + qemu.convert(srcFile, dstFile, options, qemuObjects, qemuImageOpts, null, true); + + SP_LOG("StorpoolBackupSnapshotCommandWrapper srcFileFormat=%s, dstFileFormat=%s", srcFile.getFormat(), dstFile.getFormat()); + final File snapFile = new File(dstPath); + size = snapFile.exists() ? snapFile.length() : 0; + return size; + } } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/api/StorPoolSnapshotDef.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/api/StorPoolSnapshotDef.java new file mode 100644 index 00000000000..26004205709 --- /dev/null +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/api/StorPoolSnapshotDef.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.cloudstack.storage.datastore.api; + +import java.io.Serializable; +import java.util.Map; + +public class StorPoolSnapshotDef implements Serializable { + private static final long serialVersionUID = 1L; + private String name; + private Integer deleteAfter; + private Map tags; + private Boolean bind; + private Integer iops; + private String rename; + private transient String volumeName; + + public StorPoolSnapshotDef(String volumeName, Integer deleteAfter, Map tags) { + super(); + this.volumeName = volumeName; + this.deleteAfter = deleteAfter; + this.tags = tags; + } + + public StorPoolSnapshotDef(String name, Integer deleteAfter, Map tags, Boolean bind, Integer iops, + String rename) { + super(); + this.name = name; + this.deleteAfter = deleteAfter; + this.tags = tags; + this.bind = bind; + this.iops = iops; + this.rename = rename; + } + + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public Integer getDeleteAfter() { + return deleteAfter; + } + public void setDeleteAfter(Integer deleteAfter) { + this.deleteAfter = deleteAfter; + } + public Map getTags() { + return tags; + } + public void setTags(Map tags) { + this.tags = tags; + } + public Boolean getBind() { + return bind; + } + public void setBind(Boolean bind) { + this.bind = bind; + } + public Integer getIops() { + return iops; + } + public void setIops(Integer iops) { + this.iops = iops; + } + public String getRename() { + return rename; + } + public void setRename(String rename) { + this.rename = rename; + } + + public String getVolumeName() { + return volumeName; + } + + public void setVolumeName(String volumeName) { + this.volumeName = volumeName; + } + +} diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/api/StorPoolVolumeDef.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/api/StorPoolVolumeDef.java new file mode 100644 index 00000000000..456f5b90639 --- /dev/null +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/api/StorPoolVolumeDef.java @@ -0,0 +1,109 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +import java.io.Serializable; +import java.util.Map; + +public class StorPoolVolumeDef implements Serializable { + + private static final long serialVersionUID = 1L; + private transient String name; + private Long size; + private Map tags; + private String parent; + private Long iops; + private String template; + private String baseOn; + private String rename; + private Boolean shrinkOk; + + public StorPoolVolumeDef() { + } + + public StorPoolVolumeDef(String name, Long size, Map tags, String parent, Long iops, String template, + String baseOn, String rename, Boolean shrinkOk) { + super(); + this.name = name; + this.size = size; + this.tags = tags; + this.parent = parent; + this.iops = iops; + this.template = template; + this.baseOn = baseOn; + this.rename = rename; + this.shrinkOk = shrinkOk; + } + + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public Long getSize() { + return size; + } + public void setSize(Long size) { + this.size = size; + } + public Map getTags() { + return tags; + } + public void setTags(Map tags) { + this.tags = tags; + } + public String getParent() { + return parent; + } + public void setParent(String parent) { + this.parent = parent; + } + public Long getIops() { + return iops; + } + public void setIops(Long iops) { + this.iops = iops; + } + public String getTemplate() { + return template; + } + public void setTemplate(String template) { + this.template = template; + } + public String getBaseOn() { + return baseOn; + } + public void setBaseOn(String baseOn) { + this.baseOn = baseOn; + } + public String getRename() { + return rename; + } + public void setRename(String rename) { + this.rename = rename; + } + + public Boolean getShrinkOk() { + return shrinkOk; + } + + public void setShrinkOk(Boolean shrinkOk) { + this.shrinkOk = shrinkOk; + } +} diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java index f7e643ca62b..631186636ca 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java @@ -18,6 +18,7 @@ */ package org.apache.cloudstack.storage.datastore.driver; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -31,17 +32,22 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.resourcedetail.DiskOfferingDetailVO; +import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao; import org.apache.cloudstack.storage.RemoteHostEndPoint; import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.CreateObjectAnswer; import org.apache.cloudstack.storage.command.StorageSubSystemCommand; +import org.apache.cloudstack.storage.datastore.api.StorPoolSnapshotDef; +import org.apache.cloudstack.storage.datastore.api.StorPoolVolumeDef; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; @@ -78,14 +84,22 @@ import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.dc.dao.ClusterDao; +import com.cloud.exception.StorageUnavailableException; import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor; +import com.cloud.offering.DiskOffering; import com.cloud.server.ResourceTag; import com.cloud.server.ResourceTag.ResourceObjectType; +import com.cloud.service.ServiceOfferingDetailsVO; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.service.dao.ServiceOfferingDetailsDao; import com.cloud.storage.DataStoreRole; import com.cloud.storage.ResizeVolumePayload; +import com.cloud.storage.Snapshot; +import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; @@ -94,6 +108,7 @@ import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeDetailVO; import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.SnapshotDetailsDao; import com.cloud.storage.dao.SnapshotDetailsVO; import com.cloud.storage.dao.StoragePoolHostDao; @@ -133,9 +148,11 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { @Inject private HostDao hostDao; @Inject - private ResourceTagDao _resourceTagDao; + private ResourceTagDao resourceTagDao; @Inject - private SnapshotDetailsDao _snapshotDetailsDao; + private SnapshotDetailsDao snapshotDetailsDao; + @Inject + private SnapshotDao snapshotDao; @Inject private SnapshotDataStoreDao snapshotDataStoreDao; @Inject @@ -148,6 +165,12 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { private StoragePoolHostDao storagePoolHostDao; @Inject DataStoreManager dataStoreManager; + @Inject + private DiskOfferingDetailsDao diskOfferingDetailsDao; + @Inject + private ServiceOfferingDetailsDao serviceOfferingDetailDao; + @Inject + private ServiceOfferingDao serviceOfferingDao; private SnapshotDataStoreVO getSnapshotImageStoreRef(long snapshotId, long zoneId) { List snaps = snapshotDataStoreDao.listReadyBySnapshot(snapshotId, DataStoreRole.Image); @@ -251,15 +274,25 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback callback) { String path = null; Answer answer; + String tier = null; + String template = null; if (data.getType() == DataObjectType.VOLUME) { try { VolumeInfo vinfo = (VolumeInfo)data; String name = vinfo.getUuid(); Long size = vinfo.getPassphraseId() == null ? vinfo.getSize() : vinfo.getSize() + 2097152; + Long vmId = vinfo.getInstanceId(); + SpConnectionDesc conn = StorPoolUtil.getSpConnection(dataStore.getUuid(), dataStore.getId(), storagePoolDetailsDao, primaryStoreDao); - StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriver.createAsync volume: name=%s, uuid=%s, isAttached=%s vm=%s, payload=%s, template: %s", vinfo.getName(), vinfo.getUuid(), vinfo.isAttachedVM(), vinfo.getAttachedVmName(), vinfo.getpayload(), conn.getTemplateName()); - SpApiResponse resp = StorPoolUtil.volumeCreate(name, null, size, getVMInstanceUUID(vinfo.getInstanceId()), null, "volume", vinfo.getMaxIops(), conn); + if (vinfo.getDiskOfferingId() != null) { + tier = getTierFromOfferingDetail(vinfo.getDiskOfferingId()); + if (tier == null) { + template = getTemplateFromOfferingDetail(vinfo.getDiskOfferingId()); + } + } + + SpApiResponse resp = createStorPoolVolume(template, tier, vinfo, name, size, vmId, conn); if (resp.getError() == null) { String volumeName = StorPoolUtil.getNameFromResponse(resp, false); path = StorPoolUtil.devPath(volumeName); @@ -290,6 +323,26 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { } } + private SpApiResponse createStorPoolVolume(String template, String tier, VolumeInfo vinfo, String name, Long size, + Long vmId, SpConnectionDesc conn) { + SpApiResponse resp = new SpApiResponse(); + Map tags = StorPoolHelper.addStorPoolTags(name, getVMInstanceUUID(vmId), "volume", getVcPolicyTag(vmId), tier); + if (tier != null || template != null) { + StorPoolUtil.spLog( + "Creating volume [%s] with template [%s] or tier tags [%s] described in disk/service offerings details", + vinfo.getUuid(), template, tier); + resp = StorPoolUtil.volumeCreate(size, null, template, tags, conn); + } else { + StorPoolUtil.spLog( + "StorpoolPrimaryDataStoreDriver.createAsync volume: name=%s, uuid=%s, isAttached=%s vm=%s, payload=%s, template: %s", + vinfo.getName(), vinfo.getUuid(), vinfo.isAttachedVM(), vinfo.getAttachedVmName(), + vinfo.getpayload(), conn.getTemplateName()); + resp = StorPoolUtil.volumeCreate(name, null, size, getVMInstanceUUID(vinfo.getInstanceId()), null, + "volume", vinfo.getMaxIops(), conn); + } + return resp; + } + private void updateVolume(DataStore dataStore, String path, VolumeInfo vinfo) { VolumeVO volume = volumeDao.findById(vinfo.getId()); volume.setPoolId(dataStore.getId()); @@ -328,68 +381,111 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { public void resize(DataObject data, AsyncCompletionCallback callback) { String path = null; String err = null; - ResizeVolumeAnswer answer = null; if (data.getType() == DataObjectType.VOLUME) { VolumeObject vol = (VolumeObject)data; - StoragePool pool = (StoragePool)data.getDataStore(); - ResizeVolumePayload payload = (ResizeVolumePayload)vol.getpayload(); + path = vol.getPath(); - final String name = StorPoolStorageAdaptor.getVolumeNameFromPath(vol.getPath(), true); - final long oldSize = vol.getSize(); - Long oldMaxIops = vol.getMaxIops(); - - try { - SpConnectionDesc conn = StorPoolUtil.getSpConnection(data.getDataStore().getUuid(), data.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao); - - long maxIops = payload.newMaxIops == null ? Long.valueOf(0) : payload.newMaxIops; - - StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.resize: name=%s, uuid=%s, oldSize=%d, newSize=%s, shrinkOk=%s, maxIops=%s", name, vol.getUuid(), oldSize, payload.newSize, payload.shrinkOk, maxIops); - - SpApiResponse resp = StorPoolUtil.volumeUpdate(name, payload.newSize, payload.shrinkOk, maxIops, conn); - if (resp.getError() != null) { - err = String.format("Could not resize StorPool volume %s. Error: %s", name, resp.getError()); - } else { - StorPoolResizeVolumeCommand resizeCmd = new StorPoolResizeVolumeCommand(vol.getPath(), new StorageFilerTO(pool), vol.getSize(), payload.newSize, payload.shrinkOk, - payload.instanceName, payload.hosts == null ? false : true); - answer = (ResizeVolumeAnswer) storageMgr.sendToPool(pool, payload.hosts, resizeCmd); - - if (answer == null || !answer.getResult()) { - err = answer != null ? answer.getDetails() : "return a null answer, resize failed for unknown reason"; - } else { - path = StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false)); - - vol.setSize(payload.newSize); - vol.update(); - if (payload.newMaxIops != null) { - VolumeVO volume = volumeDao.findById(vol.getId()); - volume.setMaxIops(payload.newMaxIops); - volumeDao.update(volume.getId(), volume); - } - - updateStoragePool(vol.getPoolId(), payload.newSize - oldSize); - } - } - if (err != null) { - // try restoring volume to its initial size - resp = StorPoolUtil.volumeUpdate(name, oldSize, true, oldMaxIops, conn); - if (resp.getError() != null) { - logger.debug(String.format("Could not resize StorPool volume %s back to its original size. Error: %s", name, resp.getError())); - } - } - } catch (Exception e) { - logger.debug("sending resize command failed", e); - err = e.toString(); - } + err = resizeVolume(data, path, vol); } else { err = String.format("Invalid object type \"%s\" passed to resize", data.getType()); } - CreateCmdResult res = new CreateCmdResult(path, answer); + CreateCmdResult res = new CreateCmdResult(path, new Answer(null, err != null, err)); res.setResult(err); callback.complete(res); } + private String resizeVolume(DataObject data, String path, VolumeObject vol) { + String err = null; + ResizeVolumePayload payload = (ResizeVolumePayload)vol.getpayload(); + boolean needResize = vol.getSize() != payload.newSize; + + final String name = StorPoolStorageAdaptor.getVolumeNameFromPath(path, true); + final long oldSize = vol.getSize(); + Long oldMaxIops = vol.getMaxIops(); + + try { + SpConnectionDesc conn = StorPoolUtil.getSpConnection(data.getDataStore().getUuid(), data.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao); + + err = updateStorPoolVolume(vol, payload, conn); + if (err == null && needResize) { + err = notifyQemuForTheNewSize(data, err, vol, payload); + } + + if (err != null) { + // try restoring volume to its initial size + SpApiResponse response = StorPoolUtil.volumeUpdate(name, oldSize, true, oldMaxIops, conn); + if (response.getError() != null) { + logger.debug(String.format("Could not resize StorPool volume %s back to its original size. Error: %s", name, response.getError())); + } + } + } catch (Exception e) { + logger.debug("sending resize command failed", e); + err = e.toString(); + } + return err; + } + + private String notifyQemuForTheNewSize(DataObject data, String err, VolumeObject vol, ResizeVolumePayload payload) + throws StorageUnavailableException { + StoragePool pool = (StoragePool)data.getDataStore(); + + StorPoolResizeVolumeCommand resizeCmd = new StorPoolResizeVolumeCommand(vol.getPath(), new StorageFilerTO(pool), vol.getSize(), payload.newSize, payload.shrinkOk, + payload.instanceName, payload.hosts == null ? false : true); + ResizeVolumeAnswer answer = (ResizeVolumeAnswer) storageMgr.sendToPool(pool, payload.hosts, resizeCmd); + + if (answer == null || !answer.getResult()) { + err = answer != null ? answer.getDetails() : "return a null answer, resize failed for unknown reason"; + } + return err; + } + + private String updateStorPoolVolume(VolumeObject vol, ResizeVolumePayload payload, SpConnectionDesc conn) { + String err = null; + String name = StorPoolStorageAdaptor.getVolumeNameFromPath(vol.getPath(), true); + Long newDiskOfferingId = payload.getNewDiskOfferingId(); + String tier = null; + String template = null; + if (newDiskOfferingId != null) { + tier = getTierFromOfferingDetail(newDiskOfferingId); + if (tier == null) { + template = getTemplateFromOfferingDetail(newDiskOfferingId); + } + } + SpApiResponse resp = new SpApiResponse(); + if (tier != null || template != null) { + Map tags = StorPoolHelper.addStorPoolTags(null, null, null, null, tier); + StorPoolVolumeDef spVolume = new StorPoolVolumeDef(name, payload.newSize, tags, null, null, template, null, null, + payload.shrinkOk); + resp = StorPoolUtil.volumeUpdate(spVolume, conn); + } else { + long maxIops = payload.newMaxIops == null ? Long.valueOf(0) : payload.newMaxIops; + + StorPoolVolumeDef spVolume = new StorPoolVolumeDef(name, payload.newSize, null, null, maxIops, null, null, null, + payload.shrinkOk); + StorPoolUtil.spLog( + "StorpoolPrimaryDataStoreDriverImpl.resize: name=%s, uuid=%s, oldSize=%d, newSize=%s, shrinkOk=%s, maxIops=%s", + name, vol.getUuid(), vol.getSize(), payload.newSize, payload.shrinkOk, maxIops); + + resp = StorPoolUtil.volumeUpdate(spVolume, conn); + } + if (resp.getError() != null) { + err = String.format("Could not resize StorPool volume %s. Error: %s", name, resp.getError()); + } else { + vol.setSize(payload.newSize); + vol.update(); + if (payload.newMaxIops != null) { + VolumeVO volume = volumeDao.findById(vol.getId()); + volume.setMaxIops(payload.newMaxIops); + volumeDao.update(volume.getId(), volume); + } + + updateStoragePool(vol.getPoolId(), payload.newSize - vol.getSize()); + } + return err; + } + @Override public void deleteAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback callback) { String err = null; @@ -402,7 +498,7 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { } try { SpConnectionDesc conn = StorPoolUtil.getSpConnection(dataStore.getUuid(), dataStore.getId(), storagePoolDetailsDao, primaryStoreDao); - + tryToSnapshotVolumeBeforeDelete(vinfo, dataStore, name, conn); SpApiResponse resp = StorPoolUtil.volumeDelete(name, conn); if (resp.getError() == null) { updateStoragePool(dataStore.getId(), - vinfo.getSize()); @@ -432,6 +528,54 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { callback.complete(res); } + private void tryToSnapshotVolumeBeforeDelete(VolumeInfo vinfo, DataStore dataStore, String name, SpConnectionDesc conn) { + Integer deleteAfter = StorPoolConfigurationManager.DeleteAfterInterval.valueIn(dataStore.getId()); + if (deleteAfter != null && deleteAfter > 0 && vinfo.getPassphraseId() == null) { + createTemporarySnapshot(vinfo, name, deleteAfter, conn); + } else { + StorPoolUtil.spLog("The volume [%s] is not marked to be snapshot. Check the global setting `storpool.delete.after.interval` or the volume is encrypted [%s]", name, deleteAfter, vinfo.getPassphraseId() != null); + } + } + + private void createTemporarySnapshot(VolumeInfo vinfo, String name, Integer deleteAfter, SpConnectionDesc conn) { + Map tags = new HashMap<>(); + tags.put("cs", StorPoolUtil.DELAY_DELETE); + StorPoolSnapshotDef snapshot = new StorPoolSnapshotDef(name, deleteAfter, tags); + StorPoolUtil.spLog("Creating backup snapshot before delete the volume [%s]", vinfo.getName()); + SpApiResponse snapshotResponse = StorPoolUtil.volumeSnapshot(snapshot, conn); + if (snapshotResponse.getError() == null) { + String snapshotName = StorPoolUtil.getSnapshotNameFromResponse(snapshotResponse, false, StorPoolUtil.GLOBAL_ID); + String snapshotPath = StorPoolUtil.devPath(snapshotName); + SnapshotVO snapshotVo = createSnapshotVo(vinfo, snapshotName); + createSnapshotOnPrimaryVo(vinfo, snapshotVo, snapshotPath); + SnapshotDetailsVO snapshotDetails = new SnapshotDetailsVO(snapshotVo.getId(), StorPoolUtil.SP_DELAY_DELETE, "~" + snapshotName, true); + snapshotDetailsDao.persist(snapshotDetails); + } + } + + private void createSnapshotOnPrimaryVo(VolumeInfo vinfo, SnapshotVO snapshotVo, String snapshotPath) { + SnapshotDataStoreVO snapshotOnPrimaryVo = new SnapshotDataStoreVO(); + snapshotOnPrimaryVo.setSnapshotId(snapshotVo.getId()); + snapshotOnPrimaryVo.setDataStoreId(vinfo.getDataCenterId()); + snapshotOnPrimaryVo.setRole(vinfo.getDataStore().getRole()); + snapshotOnPrimaryVo.setVolumeId(vinfo.getId()); + snapshotOnPrimaryVo.setSize(vinfo.getSize()); + snapshotOnPrimaryVo.setPhysicalSize(vinfo.getSize()); + snapshotOnPrimaryVo.setInstallPath(snapshotPath); + snapshotOnPrimaryVo.setState(ObjectInDataStoreStateMachine.State.Ready); + snapshotDataStoreDao.persist(snapshotOnPrimaryVo); + } + + private SnapshotVO createSnapshotVo(VolumeInfo vinfo, String snapshotName) { + SnapshotVO snapshotVo = new SnapshotVO(vinfo.getDataCenterId(), vinfo.getAccountId(), vinfo.getDomainId(), vinfo.getId(), + vinfo.getDiskOfferingId(), snapshotName, + (short)Snapshot.Type.RECURRING.ordinal(), Snapshot.Type.RECURRING.name(), + vinfo.getSize(), vinfo.getMinIops(), vinfo.getMaxIops(), vinfo.getHypervisorType(), Snapshot.LocationType.PRIMARY); + snapshotVo.setState(com.cloud.storage.Snapshot.State.BackedUp); + snapshotVo = snapshotDao.persist(snapshotVo); + return snapshotVo; + } + private void logDataObject(final String pref, DataObject data) { final DataStore dstore = data.getDataStore(); String name = null; @@ -474,7 +618,7 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { try { if (srcType == DataObjectType.SNAPSHOT && dstType == DataObjectType.VOLUME) { SnapshotInfo sinfo = (SnapshotInfo)srcData; - final String snapshotName = StorPoolHelper.getSnapshotName(srcData.getId(), srcData.getUuid(), snapshotDataStoreDao, _snapshotDetailsDao); + final String snapshotName = StorPoolHelper.getSnapshotName(srcData.getId(), srcData.getUuid(), snapshotDataStoreDao, snapshotDetailsDao); VolumeInfo vinfo = (VolumeInfo)dstData; final String volumeName = vinfo.getUuid(); @@ -492,9 +636,12 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { StorPoolUtil.spLog("Created volume=%s with uuid=%s from snapshot=%s with uuid=%s", StorPoolUtil.getNameFromResponse(resp, false), to.getUuid(), snapshotName, sinfo.getUuid()); } else if (resp.getError().getName().equals("objectDoesNotExist")) { //check if snapshot is on secondary storage - StorPoolUtil.spLog("Snapshot %s does not exists on StorPool, will try to create a volume from a snopshot on secondary storage", snapshotName); + StorPoolUtil.spLog("Snapshot %s does not exists on StorPool, will try to create a volume from a snapshot on secondary storage", snapshotName); SnapshotDataStoreVO snap = getSnapshotImageStoreRef(sinfo.getId(), vinfo.getDataCenterId()); - if (snap != null && StorPoolStorageAdaptor.getVolumeNameFromPath(snap.getInstallPath(), false) == null) { + SnapshotDetailsVO snapshotDetail = snapshotDetailsDao.findDetail(sinfo.getId(), StorPoolUtil.SP_DELAY_DELETE); + if (snapshotDetail != null) { + err = String.format("Could not create volume from snapshot due to: %s", resp.getError()); + } else if (snap != null && StorPoolStorageAdaptor.getVolumeNameFromPath(snap.getInstallPath(), false) == null) { resp = StorPoolUtil.volumeCreate(srcData.getUuid(), null, size, null, "no", "snapshot", sinfo.getBaseVolume().getMaxIops(), conn); if (resp.getError() == null) { VolumeObjectTO dstTO = (VolumeObjectTO) dstData.getTO(); @@ -515,11 +662,11 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { err = String.format("Could not freeze Storpool volume %s. Error: %s", srcData.getUuid(), resp2.getError()); } else { String name = StorPoolUtil.getNameFromResponse(resp, false); - SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(sinfo.getId(), sinfo.getUuid()); + SnapshotDetailsVO snapshotDetails = snapshotDetailsDao.findDetail(sinfo.getId(), sinfo.getUuid()); if (snapshotDetails != null) { StorPoolHelper.updateSnapshotDetailsValue(snapshotDetails.getId(), StorPoolUtil.devPath(name), "snapshot"); }else { - StorPoolHelper.addSnapshotDetails(sinfo.getId(), sinfo.getUuid(), StorPoolUtil.devPath(name), _snapshotDetailsDao); + StorPoolHelper.addSnapshotDetails(sinfo.getId(), sinfo.getUuid(), StorPoolUtil.devPath(name), snapshotDetailsDao); } resp = StorPoolUtil.volumeCreate(volumeName, StorPoolUtil.getNameFromResponse(resp, true), size, null, null, "volume", sinfo.getBaseVolume().getMaxIops(), conn); if (resp.getError() == null) { @@ -549,8 +696,10 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { err = String.format("Could not create Storpool volume %s from snapshot %s. Error: %s", volumeName, snapshotName, resp.getError()); } } else if (srcType == DataObjectType.SNAPSHOT && dstType == DataObjectType.SNAPSHOT) { + SnapshotInfo sinfo = (SnapshotInfo)srcData; + SnapshotDetailsVO snapshotDetail = snapshotDetailsDao.findDetail(sinfo.getId(), StorPoolUtil.SP_DELAY_DELETE); // bypass secondary storage - if (StorPoolConfigurationManager.BypassSecondaryStorage.value()) { + if (StorPoolConfigurationManager.BypassSecondaryStorage.value() || snapshotDetail != null) { SnapshotObjectTO snapshot = (SnapshotObjectTO) srcData.getTO(); answer = new CopyCmdAnswer(snapshot); } else { @@ -711,8 +860,30 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { } StorPoolUtil.spLog(String.format("volume size is: %d", size)); Long vmId = vinfo.getInstanceId(); - SpApiResponse resp = StorPoolUtil.volumeCreate(name, parentName, size, getVMInstanceUUID(vmId), getVcPolicyTag(vmId), - "volume", vinfo.getMaxIops(), conn); + + String template = null; + String tier = null; + SpApiResponse resp = new SpApiResponse(); + + if (vinfo.getDiskOfferingId() != null) { + tier = getTierFromOfferingDetail(vinfo.getDiskOfferingId()); + if (tier == null) { + template = getTemplateFromOfferingDetail(vinfo.getDiskOfferingId()); + } + } + + if (tier != null || template != null) { + Map tags = StorPoolHelper.addStorPoolTags(name, getVMInstanceUUID(vmId), "volume", getVcPolicyTag(vmId), tier); + + StorPoolUtil.spLog( + "Creating volume [%s] with template [%s] or tier tags [%s] described in disk/service offerings details", + vinfo.getUuid(), template, tier); + resp = StorPoolUtil.volumeCreate(size, parentName, template, tags, conn); + } else { + resp = StorPoolUtil.volumeCreate(name, parentName, size, getVMInstanceUUID(vmId), + getVcPolicyTag(vmId), "volume", vinfo.getMaxIops(), conn); + } + if (resp.getError() == null) { updateStoragePool(dstData.getDataStore().getId(), vinfo.getSize()); updateVolumePoolType(vinfo); @@ -987,9 +1158,9 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { SnapshotObjectTO snapTo = (SnapshotObjectTO)snapshot.getTO(); snapTo.setPath(StorPoolUtil.devPath(name.split("~")[1])); answer = new CreateObjectAnswer(snapTo); - StorPoolHelper.addSnapshotDetails(snapshot.getId(), snapshot.getUuid(), snapTo.getPath(), _snapshotDetailsDao); + StorPoolHelper.addSnapshotDetails(snapshot.getId(), snapshot.getUuid(), snapTo.getPath(), snapshotDetailsDao); //add primary storage of snapshot - StorPoolHelper.addSnapshotDetails(snapshot.getId(), StorPoolUtil.SP_STORAGE_POOL_ID, String.valueOf(snapshot.getDataStore().getId()), _snapshotDetailsDao); + StorPoolHelper.addSnapshotDetails(snapshot.getId(), StorPoolUtil.SP_STORAGE_POOL_ID, String.valueOf(snapshot.getDataStore().getId()), snapshotDetailsDao); StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.takeSnapshot: snapshot: name=%s, uuid=%s, volume: name=%s, uuid=%s", name, snapshot.getUuid(), volumeName, vinfo.getUuid()); } } catch (Exception e) { @@ -1004,7 +1175,7 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { @Override public void revertSnapshot(final SnapshotInfo snapshot, final SnapshotInfo snapshotOnPrimaryStore, final AsyncCompletionCallback callback) { final VolumeInfo vinfo = snapshot.getBaseVolume(); - final String snapshotName = StorPoolHelper.getSnapshotName(snapshot.getId(), snapshot.getUuid(), snapshotDataStoreDao, _snapshotDetailsDao); + final String snapshotName = StorPoolHelper.getSnapshotName(snapshot.getId(), snapshot.getUuid(), snapshotDataStoreDao, snapshotDetailsDao); final String volumeName = StorPoolStorageAdaptor.getVolumeNameFromPath(vinfo.getPath(), true); StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.revertSnapshot: snapshot: name=%s, uuid=%s, volume: name=%s, uuid=%s", snapshotName, snapshot.getUuid(), volumeName, vinfo.getUuid()); String err = null; @@ -1059,7 +1230,7 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { } private String getVcPolicyTag(Long vmId) { - ResourceTag resourceTag = vmId != null ? _resourceTagDao.findByKey(vmId, ResourceObjectType.UserVm, StorPoolUtil.SP_VC_POLICY) : null; + ResourceTag resourceTag = vmId != null ? resourceTagDao.findByKey(vmId, ResourceObjectType.UserVm, StorPoolUtil.SP_VC_POLICY) : null; return resourceTag != null ? resourceTag.getValue() : ""; } @@ -1194,4 +1365,67 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { StorPoolUtil.spLog("The volume [%s] is detach from all clusters [%s]", volName, resp); } } + + @Override + public boolean informStorageForDiskOfferingChange() { + return true; + } + + @Override + public void updateStorageWithTheNewDiskOffering(Volume volume, DiskOffering newDiskOffering) { + if (newDiskOffering == null) { + return; + } + + StoragePoolVO pool = primaryStoreDao.findById(volume.getPoolId()); + if (pool == null) { + return; + } + + String tier = getTierFromOfferingDetail(newDiskOffering.getId()); + String template = null; + if (tier == null) { + template = getTemplateFromOfferingDetail(newDiskOffering.getId()); + } + if (tier == null && template == null) { + return; + } + SpConnectionDesc conn = StorPoolUtil.getSpConnection(pool.getUuid(), pool.getId(), storagePoolDetailsDao, primaryStoreDao); + StorPoolUtil.spLog("Updating volume [%s] with tier tag [%s] or template [%s] from Disk offering", volume.getId(), tier, template); + String volumeName = StorPoolStorageAdaptor.getVolumeNameFromPath(volume.getPath(), true); + Map tags = StorPoolHelper.addStorPoolTags(null, null, null, null, tier); + StorPoolVolumeDef spVolume = new StorPoolVolumeDef(volumeName, null, tags, null, null, template, null, null, null); + SpApiResponse response = StorPoolUtil.volumeUpdate(spVolume, conn); + if (response.getError() != null) { + StorPoolUtil.spLog("Could not update volume [%s] with tier tag [%s] or template [%s] from Disk offering due to [%s]", volume.getId(), tier, template, response.getError()); + } + } + + private String getTemplateFromOfferingDetail(Long diskOfferingId) { + String template = null; + DiskOfferingDetailVO diskOfferingDetail = diskOfferingDetailsDao.findDetail(diskOfferingId, StorPoolUtil.SP_TEMPLATE); + if (diskOfferingDetail == null ) { + ServiceOfferingVO serviceOffering = serviceOfferingDao.findServiceOfferingByComputeOnlyDiskOffering(diskOfferingId, true); + if (serviceOffering != null) { + ServiceOfferingDetailsVO serviceOfferingDetail = serviceOfferingDetailDao.findDetail(serviceOffering.getId(), StorPoolUtil.SP_TEMPLATE); + if (serviceOfferingDetail != null) { + template = serviceOfferingDetail.getValue(); + } + } + } else { + template = diskOfferingDetail.getValue(); + } + return template; + } + + private String getTierFromOfferingDetail(Long diskOfferingId) { + String tier = null; + DiskOfferingDetailVO diskOfferingDetail = diskOfferingDetailsDao.findDetail(diskOfferingId, StorPoolUtil.SP_TIER); + if (diskOfferingDetail == null ) { + return tier; + } else { + tier = diskOfferingDetail.getValue(); + } + return tier; + } } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolStatsCollector.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolStatsCollector.java index a41ff66229c..56b150a04e6 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolStatsCollector.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolStatsCollector.java @@ -28,18 +28,28 @@ import java.util.concurrent.TimeUnit; import javax.inject.Inject; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.util.StorPoolUtil; import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.SnapshotDetailsDao; +import com.cloud.storage.dao.SnapshotDetailsVO; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; +import com.cloud.utils.exception.CloudRuntimeException; import com.google.gson.JsonArray; import com.google.gson.JsonElement; import com.google.gson.JsonObject; @@ -52,6 +62,12 @@ public class StorPoolStatsCollector extends ManagerBase { private StoragePoolDetailsDao storagePoolDetailsDao; @Inject private ConfigurationDao configurationDao; + @Inject + private SnapshotDao snapshotDao; + @Inject + private SnapshotDataStoreDao snapshotDataStoreDao; + @Inject + private SnapshotDetailsDao snapshotDetailsDao; private ScheduledExecutorService executor; @@ -67,7 +83,7 @@ public class StorPoolStatsCollector extends ManagerBase { public boolean start() { List spPools = storagePoolDao.findPoolsByProvider(StorPoolUtil.SP_PROVIDER_NAME); if (CollectionUtils.isNotEmpty(spPools)) { - executor = Executors.newScheduledThreadPool(2,new NamedThreadFactory("StorPoolStatsCollector")); + executor = Executors.newScheduledThreadPool(3, new NamedThreadFactory("StorPoolStatsCollector")); long storageStatsInterval = NumbersUtil.parseLong(configurationDao.getValue("storage.stats.interval"), 60000L); long volumeStatsInterval = NumbersUtil.parseLong(configurationDao.getValue("volume.stats.interval"), 60000L); @@ -77,6 +93,13 @@ public class StorPoolStatsCollector extends ManagerBase { if (StorPoolConfigurationManager.StorageStatsInterval.value() > 0 && storageStatsInterval > 0) { executor.scheduleAtFixedRate(new StorPoolStorageStatsMonitorTask(), 120, StorPoolConfigurationManager.StorageStatsInterval.value(), TimeUnit.SECONDS); } + for (StoragePoolVO pool: spPools) { + Integer deleteAfter = StorPoolConfigurationManager.DeleteAfterInterval.valueIn(pool.getId()); + if (deleteAfter != null && deleteAfter > 0) { + executor.scheduleAtFixedRate(new StorPoolSnapshotsWithDelayDelete(), 120, StorPoolConfigurationManager.ListSnapshotsWithDeleteAfterInterval.value(), TimeUnit.SECONDS); + break; + } + } } return true; @@ -182,4 +205,90 @@ public class StorPoolStatsCollector extends ManagerBase { } } } + + class StorPoolSnapshotsWithDelayDelete implements Runnable { + + @Override + public void run() { + List spPools = storagePoolDao.findPoolsByProvider(StorPoolUtil.SP_PROVIDER_NAME); + if (CollectionUtils.isNotEmpty(spPools)) { + Map onePoolForZone = new HashMap<>(); + for (StoragePoolVO storagePoolVO : spPools) { + onePoolForZone.put(storagePoolVO.getDataCenterId(), storagePoolVO); + } + for (StoragePoolVO storagePool : onePoolForZone.values()) { + List snapshotsDetails = snapshotDetailsDao.findDetailsByZoneAndKey(storagePool.getDataCenterId(), StorPoolUtil.SP_DELAY_DELETE); + if (CollectionUtils.isEmpty(snapshotsDetails)) { + return; + } + Map snapshotsWithDelayDelete = new HashMap<>(); + + try { + logger.debug(String.format("Collecting snapshots marked to be deleted for zone [%s]", storagePool.getDataCenterId())); + JsonArray arr = StorPoolUtil.snapshotsListAllClusters(StorPoolUtil.getSpConnection(storagePool.getUuid(), + storagePool.getId(), storagePoolDetailsDao, storagePoolDao)); + snapshotsWithDelayDelete.putAll(getSnapshotsMarkedForDeletion(arr)); + logger.debug(String.format("Found snapshot details [%s] and snapshots on StorPool with delay delete flag [%s]", snapshotsDetails, snapshotsWithDelayDelete)); + syncSnapshots(snapshotsDetails, snapshotsWithDelayDelete); + } catch (Exception e) { + logger.debug("Could not fetch the snapshots with delay delete flag " + e.getMessage()); + } + } + } + } + + private void syncSnapshots(List snapshotsDetails, + Map snapshotsWithDelayDelete) { + for (SnapshotDetailsVO snapshotDetailsVO : snapshotsDetails) { + if (!snapshotsWithDelayDelete.containsKey(snapshotDetailsVO.getValue())) { + StorPoolUtil.spLog("The snapshot [%s] with delayDelete flag is no longer on StorPool. Removing it from CloudStack", snapshotDetailsVO.getValue()); + SnapshotDataStoreVO ss = snapshotDataStoreDao + .findBySourceSnapshot(snapshotDetailsVO.getResourceId(), DataStoreRole.Primary); + if (ss != null) { + ss.setState(State.Destroyed); + snapshotDataStoreDao.update(ss.getId(), ss); + } + SnapshotVO snap = snapshotDao.findById(snapshotDetailsVO.getResourceId()); + if (snap != null) { + snap.setState(com.cloud.storage.Snapshot.State.Destroyed); + snapshotDao.update(snap.getId(), snap); + } + snapshotDetailsDao.remove(snapshotDetailsVO.getId()); + } + } + } + + private Map getSnapshotsMarkedForDeletion(JsonArray arr) { + for (JsonElement jsonElement : arr) { + JsonObject error = jsonElement.getAsJsonObject().getAsJsonObject("error"); + if (error != null) { + throw new CloudRuntimeException(String.format("Could not collect the snapshots marked for deletion from all storage nodes due to: [%s]", error)); + } + } + Map snapshotsWithDelayDelete = new HashMap<>(); + for (JsonElement jsonElement : arr) { + JsonObject response = jsonElement.getAsJsonObject().getAsJsonObject("response"); + if (response == null) { + return snapshotsWithDelayDelete; + } + collectSnapshots(snapshotsWithDelayDelete, response); + } + logger.debug("Found snapshots on StorPool" + snapshotsWithDelayDelete); + return snapshotsWithDelayDelete; + } + + private void collectSnapshots(Map snapshotsWithDelayDelete, JsonObject response) { + JsonArray snapshots = response.getAsJsonObject().getAsJsonArray("data"); + for (JsonElement snapshot : snapshots) { + String name = snapshot.getAsJsonObject().get("name").getAsString(); + JsonObject tags = snapshot.getAsJsonObject().get("tags").getAsJsonObject(); + if (!StringUtils.startsWith(name, "*") && StringUtils.containsNone(name, "@") && tags != null && !tags.entrySet().isEmpty()) { + String tag = tags.getAsJsonPrimitive("cs").getAsString(); + if (tag != null && tag.equals(StorPoolUtil.DELAY_DELETE)) { + snapshotsWithDelayDelete.put(name, tag); + } + } + } + } + } } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java index 339ee625c58..4f2fdef1723 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java @@ -38,8 +38,6 @@ import org.apache.cloudstack.storage.datastore.util.StorPoolUtil; import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse; import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.StoragePoolInfo; import com.cloud.host.HostVO; @@ -61,9 +59,7 @@ import com.cloud.storage.dao.VMTemplateDetailsDao; import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.utils.exception.CloudRuntimeException; -public class StorPoolPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle { - protected Logger logger = LogManager.getLogger(getClass()); - +public class StorPoolPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle { @Inject protected PrimaryDataStoreHelper dataStoreHelper; @Inject diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java index 5a84e699f52..3113ae8fdaa 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java @@ -163,11 +163,12 @@ public class StorPoolHelper { return null; } - public static Map addStorPoolTags(String name, String vmUuid, String csTag, String vcPolicy) { + public static Map addStorPoolTags(String name, String vmUuid, String csTag, String vcPolicy, String qcTier) { Map tags = new HashMap<>(); tags.put("uuid", name); tags.put("cvm", vmUuid); tags.put(StorPoolUtil.SP_VC_POLICY, vcPolicy); + tags.put("qc", qcTier); if (csTag != null) { tags.put("cs", csTag); } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java index 214f93f735f..97f4e2fe155 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java @@ -28,6 +28,9 @@ import com.google.gson.JsonElement; import com.google.gson.JsonObject; import com.google.gson.JsonParser; import com.google.gson.JsonPrimitive; + +import org.apache.cloudstack.storage.datastore.api.StorPoolSnapshotDef; +import org.apache.cloudstack.storage.datastore.api.StorPoolVolumeDef; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; @@ -125,6 +128,16 @@ public class StorPoolUtil { public static final String SP_VOLUME_ON_CLUSTER = "SP_VOLUME_ON_CLUSTER"; + private static final String DATA = "data"; + + private static final String CLUSTERS = "clusters"; + + public static final String SP_DELAY_DELETE = "SP_DELAY_DELETE"; + + public static final String DELAY_DELETE = "delayDelete"; + + public static final String SP_TIER = "SP_QOSCLASS"; + public static enum StorpoolRights { RO("ro"), RW("rw"), DETACH("detach"); @@ -417,27 +430,31 @@ public class StorPoolUtil { public static JsonArray snapshotsList(SpConnectionDesc conn) { SpApiResponse resp = GET("MultiCluster/SnapshotsList", conn); JsonObject obj = resp.fullJson.getAsJsonObject(); - JsonArray data = obj.getAsJsonArray("data"); - return data; + return obj.getAsJsonArray(DATA); + } + + public static JsonArray snapshotsListAllClusters(SpConnectionDesc conn) { + SpApiResponse resp = GET("MultiCluster/AllClusters/SnapshotsList", conn); + JsonObject obj = resp.fullJson.getAsJsonObject(); + return obj.getAsJsonObject(DATA).getAsJsonArray(CLUSTERS); } public static JsonArray volumesList(SpConnectionDesc conn) { SpApiResponse resp = GET("MultiCluster/VolumesList", conn); JsonObject obj = resp.fullJson.getAsJsonObject(); - JsonArray data = obj.getAsJsonArray("data"); - return data; + return obj.getAsJsonArray(DATA); } public static JsonArray volumesSpace(SpConnectionDesc conn) { SpApiResponse resp = GET("MultiCluster/AllClusters/VolumesSpace", conn); JsonObject obj = resp.fullJson.getAsJsonObject(); - return obj.getAsJsonObject("data").getAsJsonArray("clusters"); + return obj.getAsJsonObject(DATA).getAsJsonArray(CLUSTERS); } public static JsonArray templatesStats(SpConnectionDesc conn) { SpApiResponse resp = GET("MultiCluster/AllClusters/VolumeTemplatesStatus", conn); JsonObject obj = resp.fullJson.getAsJsonObject(); - return obj.getAsJsonObject("data").getAsJsonArray("clusters"); + return obj.getAsJsonObject(DATA).getAsJsonArray(CLUSTERS); } private static boolean objectExists(SpApiError err) { @@ -454,7 +471,7 @@ public class StorPoolUtil { if (resp.getError() != null && !objectExists(resp.getError())) { return null; } - JsonObject data = obj.getAsJsonArray("data").get(0).getAsJsonObject(); + JsonObject data = obj.getAsJsonArray(DATA).get(0).getAsJsonObject(); return data.getAsJsonPrimitive("size").getAsLong(); } @@ -462,7 +479,7 @@ public class StorPoolUtil { SpApiResponse resp = GET("MultiCluster/Snapshot/" + name, conn); JsonObject obj = resp.fullJson.getAsJsonObject(); - JsonObject data = obj.getAsJsonArray("data").get(0).getAsJsonObject(); + JsonObject data = obj.getAsJsonArray(DATA).get(0).getAsJsonObject(); JsonPrimitive clusterId = data.getAsJsonPrimitive("clusterId"); return clusterId != null ? clusterId.getAsString() : null; } @@ -471,7 +488,7 @@ public class StorPoolUtil { SpApiResponse resp = GET("MultiCluster/Volume/" + name, conn); JsonObject obj = resp.fullJson.getAsJsonObject(); - JsonObject data = obj.getAsJsonArray("data").get(0).getAsJsonObject(); + JsonObject data = obj.getAsJsonArray(DATA).get(0).getAsJsonObject(); JsonPrimitive clusterId = data.getAsJsonPrimitive("clusterId"); return clusterId != null ? clusterId.getAsString() : null; } @@ -484,7 +501,19 @@ public class StorPoolUtil { json.put("parent", parentName); json.put("size", size); json.put("template", conn.getTemplateName()); - Map tags = StorPoolHelper.addStorPoolTags(name, vmUuid, csTag, vcPolicy); + Map tags = StorPoolHelper.addStorPoolTags(name, vmUuid, csTag, vcPolicy, null); + json.put("tags", tags); + return POST("MultiCluster/VolumeCreate", json, conn); + } + + public static SpApiResponse volumeCreate(Long size, String parentName, String template, Map tags, SpConnectionDesc conn) { + template = template != null ? template : conn.getTemplateName(); + + Map json = new LinkedHashMap<>(); + json.put("name", ""); + json.put("parent", parentName); + json.put("size", size); + json.put("template", template); json.put("tags", tags); return POST("MultiCluster/VolumeCreate", json, conn); } @@ -508,7 +537,7 @@ public class StorPoolUtil { json.put("iops", iops); } json.put("template", conn.getTemplateName()); - Map tags = StorPoolHelper.addStorPoolTags(name, cvmTag, csTag, vcPolicyTag); + Map tags = StorPoolHelper.addStorPoolTags(name, cvmTag, csTag, vcPolicyTag, null); json.put("tags", tags); return POST("MultiCluster/VolumeCreate", json, conn); } @@ -536,7 +565,7 @@ public class StorPoolUtil { public static SpApiResponse volumeRemoveTags(String name, SpConnectionDesc conn) { Map json = new HashMap<>(); - Map tags = StorPoolHelper.addStorPoolTags(null, "", null, ""); + Map tags = StorPoolHelper.addStorPoolTags(null, "", null, "", null); json.put("tags", tags); return POST("MultiCluster/VolumeUpdate/" + name, json, conn); } @@ -544,7 +573,7 @@ public class StorPoolUtil { public static SpApiResponse volumeUpdateIopsAndTags(final String name, final String uuid, Long iops, SpConnectionDesc conn, String vcPolicy) { Map json = new HashMap<>(); - Map tags = StorPoolHelper.addStorPoolTags(null, uuid, null, vcPolicy); + Map tags = StorPoolHelper.addStorPoolTags(null, uuid, null, vcPolicy, null); json.put("iops", iops); json.put("tags", tags); return POST("MultiCluster/VolumeUpdate/" + name, json, conn); @@ -552,14 +581,14 @@ public class StorPoolUtil { public static SpApiResponse volumeUpdateCvmTags(final String name, final String uuid, SpConnectionDesc conn) { Map json = new HashMap<>(); - Map tags = StorPoolHelper.addStorPoolTags(null, uuid, null, null); + Map tags = StorPoolHelper.addStorPoolTags(null, uuid, null, null, null); json.put("tags", tags); return POST("MultiCluster/VolumeUpdate/" + name, json, conn); } public static SpApiResponse volumeUpdateVCTags(final String name, SpConnectionDesc conn, String vcPolicy) { Map json = new HashMap<>(); - Map tags = StorPoolHelper.addStorPoolTags(null, null, null, vcPolicy); + Map tags = StorPoolHelper.addStorPoolTags(null, null, null, vcPolicy, null); json.put("tags", tags); return POST("MultiCluster/VolumeUpdate/" + name, json, conn); } @@ -570,20 +599,28 @@ public class StorPoolUtil { return POST("MultiCluster/VolumeUpdate/" + name, json, conn); } + public static SpApiResponse volumeUpdate(StorPoolVolumeDef volume, SpConnectionDesc conn) { + return POST("MultiCluster/VolumeUpdate/" + volume.getName(), volume, conn); + } + public static SpApiResponse volumeSnapshot(final String volumeName, final String snapshotName, String vmUuid, String csTag, String vcPolicy, SpConnectionDesc conn) { Map json = new HashMap<>(); - Map tags = StorPoolHelper.addStorPoolTags(snapshotName, vmUuid, csTag, vcPolicy); + Map tags = StorPoolHelper.addStorPoolTags(snapshotName, vmUuid, csTag, vcPolicy, null); json.put("name", ""); json.put("tags", tags); return POST("MultiCluster/VolumeSnapshot/" + volumeName, json, conn); } + public static SpApiResponse volumeSnapshot(StorPoolSnapshotDef snapshot, SpConnectionDesc conn) { + return POST("MultiCluster/VolumeSnapshot/" + snapshot.getVolumeName(), snapshot, conn); + } + public static SpApiResponse volumesGroupSnapshot(final List volumeTOs, final String vmUuid, final String snapshotName, String csTag, SpConnectionDesc conn) { Map json = new LinkedHashMap<>(); - Map tags = StorPoolHelper.addStorPoolTags(snapshotName, vmUuid, csTag, null); + Map tags = StorPoolHelper.addStorPoolTags(snapshotName, vmUuid, csTag, null, null); List> volumes = new ArrayList<>(); for (VolumeObjectTO volumeTO : volumeTOs) { Map vol = new LinkedHashMap<>(); @@ -639,7 +676,7 @@ public class StorPoolUtil { public static String getSnapshotNameFromResponse(SpApiResponse resp, boolean tildeNeeded, String globalIdOrRemote) { JsonObject obj = resp.fullJson.getAsJsonObject(); - JsonPrimitive data = obj.getAsJsonObject("data").getAsJsonPrimitive(globalIdOrRemote); + JsonPrimitive data = obj.getAsJsonObject(DATA).getAsJsonPrimitive(globalIdOrRemote); String name = data != null ? data.getAsString() : null; name = name != null ? !tildeNeeded ? name : "~" + name : name; return name; @@ -647,7 +684,7 @@ public class StorPoolUtil { public static String getNameFromResponse(SpApiResponse resp, boolean tildeNeeded) { JsonObject obj = resp.fullJson.getAsJsonObject(); - JsonPrimitive data = obj.getAsJsonObject("data").getAsJsonPrimitive("name"); + JsonPrimitive data = obj.getAsJsonObject(DATA).getAsJsonPrimitive("name"); String name = data != null ? data.getAsString() : null; name = name != null ? name.startsWith("~") && !tildeNeeded ? name.split("~")[1] : name : name; return name; diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java index bd5380cc160..41e9676bb11 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java @@ -149,18 +149,21 @@ public class StorPoolDataMotionStrategy implements DataMotionStrategy { public StrategyPriority canHandle(DataObject srcData, DataObject destData) { DataObjectType srcType = srcData.getType(); DataObjectType dstType = destData.getType(); - if (srcType == DataObjectType.SNAPSHOT && dstType == DataObjectType.TEMPLATE - && StorPoolConfigurationManager.BypassSecondaryStorage.value()) { + if (srcType == DataObjectType.SNAPSHOT && dstType == DataObjectType.TEMPLATE) { SnapshotInfo sinfo = (SnapshotInfo) srcData; VolumeInfo volume = sinfo.getBaseVolume(); StoragePoolVO storagePool = _storagePool.findById(volume.getPoolId()); if (!storagePool.getStorageProviderName().equals(StorPoolUtil.SP_PROVIDER_NAME)) { return StrategyPriority.CANT_HANDLE; } + SnapshotDetailsVO snapshotDetail = _snapshotDetailsDao.findDetail(sinfo.getId(), StorPoolUtil.SP_DELAY_DELETE); + if (snapshotDetail != null) { + throw new CloudRuntimeException("Cannot create a template from the last snapshot of deleted volume. You can only restore the volume."); + } String snapshotName = StorPoolHelper.getSnapshotName(sinfo.getId(), sinfo.getUuid(), _snapshotStoreDao, _snapshotDetailsDao); StorPoolUtil.spLog("StorPoolDataMotionStrategy.canHandle snapshot name=%s", snapshotName); - if (snapshotName != null) { + if (snapshotName != null && StorPoolConfigurationManager.BypassSecondaryStorage.value()) { return StrategyPriority.HIGHEST; } } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolConfigurationManager.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolConfigurationManager.java index dcb2b226467..e4e930c8dee 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolConfigurationManager.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolConfigurationManager.java @@ -44,6 +44,16 @@ public class StorPoolConfigurationManager implements Configurable { "The interval in seconds to get StorPool template statistics", false); + public static final ConfigKey DeleteAfterInterval = new ConfigKey<>("Advanced", Integer.class, + "storpool.delete.after.interval", "0", + "The interval (in seconds) after the StorPool snapshot will be deleted", + false, ConfigKey.Scope.StoragePool); + + public static final ConfigKey ListSnapshotsWithDeleteAfterInterval = new ConfigKey<>("Advanced", Integer.class, + "storpool.list.snapshots.delete.after.interval", "360", + "The interval (in seconds) to fetch the StorPool snapshots with deleteAfter flag", + false); + @Override public String getConfigComponentName() { return StorPoolConfigurationManager.class.getSimpleName(); @@ -51,6 +61,6 @@ public class StorPoolConfigurationManager implements Configurable { @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] { BypassSecondaryStorage, StorPoolClusterId, AlternativeEndPointEnabled, AlternativeEndpoint, VolumesStatsInterval, StorageStatsInterval }; + return new ConfigKey[] { BypassSecondaryStorage, StorPoolClusterId, AlternativeEndPointEnabled, AlternativeEndpoint, VolumesStatsInterval, StorageStatsInterval, DeleteAfterInterval, ListSnapshotsWithDeleteAfterInterval }; } } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java index 0b58247c661..5cdb7b8cda1 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java @@ -117,6 +117,8 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy { if (resp.getError() != null) { final String err = String.format("Failed to clean-up Storpool snapshot %s. Error: %s", name, resp.getError()); StorPoolUtil.spLog(err); + markSnapshotAsDestroyedIfAlreadyRemoved(snapshotId, resp); + throw new CloudRuntimeException(err); } else { res = deleteSnapshotFromDbIfNeeded(snapshotVO, zoneId); StorPoolUtil.spLog("StorpoolSnapshotStrategy.deleteSnapshot: executed successfully=%s, snapshot uuid=%s, name=%s", res, snapshotVO.getUuid(), name); @@ -130,6 +132,16 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy { return res; } + private void markSnapshotAsDestroyedIfAlreadyRemoved(Long snapshotId, SpApiResponse resp) { + if (resp.getError().getName().equals("objectDoesNotExist")) { + SnapshotDataStoreVO snapshotOnPrimary = _snapshotStoreDao.findBySourceSnapshot(snapshotId, DataStoreRole.Primary); + if (snapshotOnPrimary != null) { + snapshotOnPrimary.setState(State.Destroyed); + _snapshotStoreDao.update(snapshotOnPrimary.getId(), snapshotOnPrimary); + } + } + } + @Override public StrategyPriority canHandle(Snapshot snapshot, Long zoneId, SnapshotOperation op) { logger.debug(String.format("StorpoolSnapshotStrategy.canHandle: snapshot=%s, uuid=%s, op=%s", snapshot.getName(), snapshot.getUuid(), op)); @@ -167,7 +179,7 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy { boolean resultIsSet = false; try { while (snapshot != null && - (snapshot.getState() == Snapshot.State.Destroying || snapshot.getState() == Snapshot.State.Destroyed || snapshot.getState() == Snapshot.State.Error)) { + (snapshot.getState() == Snapshot.State.Destroying || snapshot.getState() == Snapshot.State.Destroyed || snapshot.getState() == Snapshot.State.Error || snapshot.getState() == Snapshot.State.BackedUp)) { SnapshotInfo child = snapshot.getChild(); if (child != null) { @@ -331,9 +343,21 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy { } else { snapshotZoneDao.removeSnapshotFromZones(snapshotVO.getId()); } + if (CollectionUtils.isNotEmpty(retrieveSnapshotEntries(snapshotId, null))) { + return true; + } + updateSnapshotToDestroyed(snapshotVO); return true; } + private List retrieveSnapshotEntries(long snapshotId, Long zoneId) { + return snapshotDataFactory.getSnapshots(snapshotId, zoneId); + } + + private void updateSnapshotToDestroyed(SnapshotVO snapshotVo) { + snapshotVo.setState(Snapshot.State.Destroyed); + _snapshotDao.update(snapshotVo.getId(), snapshotVo); + } @Override public SnapshotInfo takeSnapshot(SnapshotInfo snapshot) { diff --git a/plugins/storage/volume/storpool/src/main/resources/META-INF/cloudstack/storage-volume-storpool/spring-storage-volume-storpool-context.xml b/plugins/storage/volume/storpool/src/main/resources/META-INF/cloudstack/storage-volume-storpool/spring-storage-volume-storpool-context.xml index 6451fc8fd39..b4e81f166f0 100644 --- a/plugins/storage/volume/storpool/src/main/resources/META-INF/cloudstack/storage-volume-storpool/spring-storage-volume-storpool-context.xml +++ b/plugins/storage/volume/storpool/src/main/resources/META-INF/cloudstack/storage-volume-storpool/spring-storage-volume-storpool-context.xml @@ -1,12 +1,12 @@ - 2.0.0.AM25 - 1.5 1.3.2 1.1.3 1.1-groovy-2.4 @@ -41,18 +40,21 @@ - org.codehaus.gmaven - gmaven-plugin - ${gmaven.version} - - 1.7 - - + org.codehaus.gmavenplus + gmavenplus-plugin + ${cs.gmavenplus.version} + + + org.codehaus.groovy + groovy-all + ${cs.groovy.version} + + compile - testCompile + compileTests @@ -66,29 +68,12 @@ - - - org.codehaus.gmaven.runtime - gmaven-runtime-1.7 - ${gmaven.version} - - - org.codehaus.groovy - groovy-all - - - - - org.codehaus.groovy - groovy-all - ${cs.groovy.version} - - org.apache.maven.plugins maven-surefire-plugin + @{argLine} --add-exports=java.naming/com.sun.jndi.ldap=ALL-UNNAMED **/*Spec.groovy **/*Test.java diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmd.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmd.java index 7e2114ea00f..6219fc90f81 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmd.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmd.java @@ -33,6 +33,7 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.DomainResponse; import org.apache.cloudstack.api.response.LinkAccountToLdapResponse; import org.apache.cloudstack.api.response.LinkDomainToLdapResponse; +import org.apache.cloudstack.api.response.RoleResponse; import org.apache.cloudstack.ldap.LdapManager; import org.apache.cloudstack.ldap.LdapUser; import org.apache.cloudstack.ldap.NoLdapUserMatchingQueryException; @@ -61,9 +62,12 @@ public class LinkAccountToLdapCmd extends BaseCmd { @Parameter(name = ApiConstants.ADMIN, type = CommandType.STRING, required = false, description = "domain admin username in LDAP ") private String admin; - @Parameter(name = ApiConstants.ACCOUNT_TYPE, type = CommandType.INTEGER, required = true, description = "Type of the account to auto import. Specify 0 for user and 2 for " + @Parameter(name = ApiConstants.ACCOUNT_TYPE, type = CommandType.INTEGER, required = false, description = "Type of the account to auto import. Specify 0 for user and 2 for " + "domain admin") - private int accountType; + private Integer accountType; + + @Parameter(name = ApiConstants.ROLE_ID, type = CommandType.UUID, entityType = RoleResponse.class, required = false, description = "Creates the account under the specified role.", since="4.19.1") + private Long roleId; @Inject private LdapManager _ldapManager; @@ -132,7 +136,14 @@ public class LinkAccountToLdapCmd extends BaseCmd { } public Account.Type getAccountType() { - return Account.Type.getFromValue(accountType); + if (accountType == null) { + return RoleType.getAccountTypeByRole(roleService.findRole(roleId), null); + } + return RoleType.getAccountTypeByRole(roleService.findRole(roleId), Account.Type.getFromValue(accountType.intValue())); + } + + public Long getRoleId() { + return RoleType.getRoleByAccountType(roleId, getAccountType()); } @Override diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapManagerImpl.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapManagerImpl.java index 68f5580ed1b..16914e792a6 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapManagerImpl.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapManagerImpl.java @@ -449,11 +449,12 @@ public class LdapManagerImpl extends ComponentLifecycleBase implements LdapManag Validate.notEmpty(cmd.getLdapDomain(), "ldapDomain cannot be empty, please supply a GROUP or OU name"); Validate.notNull(cmd.getType(), "type cannot be null. It should either be GROUP or OU"); Validate.notEmpty(cmd.getLdapDomain(), "GROUP or OU name cannot be empty"); + Validate.isTrue(cmd.getAccountType() != null || cmd.getRoleId() != null, "Either account type or role ID must be given"); LinkType linkType = LdapManager.LinkType.valueOf(cmd.getType().toUpperCase()); Account account = accountDao.findActiveAccount(cmd.getAccountName(),cmd.getDomainId()); if (account == null) { - account = new AccountVO(cmd.getAccountName(), cmd.getDomainId(), null, cmd.getAccountType(), UUID.randomUUID().toString()); + account = new AccountVO(cmd.getAccountName(), cmd.getDomainId(), null, cmd.getAccountType(), cmd.getRoleId(), UUID.randomUUID().toString()); accountDao.persist((AccountVO)account); } diff --git a/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/ldap/LdapTestConfigTool.java b/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/ldap/LdapTestConfigTool.java index 47f201de479..4197bad4f2d 100644 --- a/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/ldap/LdapTestConfigTool.java +++ b/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/ldap/LdapTestConfigTool.java @@ -19,7 +19,6 @@ package org.apache.cloudstack.ldap; import org.apache.cloudstack.framework.config.ConfigKey; import java.lang.reflect.Field; -import java.lang.reflect.Modifier; public class LdapTestConfigTool { public LdapTestConfigTool() { @@ -31,18 +30,12 @@ public class LdapTestConfigTool { ConfigKey key = (ConfigKey)configKey.get(ldapConfiguration); - Field modifiersField = Field.class.getDeclaredField("modifiers"); - modifiersField.setAccessible(true); - modifiersField.setInt(configKey, configKey.getModifiers() & ~Modifier.FINAL); - Field f = ConfigKey.class.getDeclaredField("_value"); f.setAccessible(true); - modifiersField.setInt(f, f.getModifiers() & ~Modifier.FINAL); f.set(key, o); Field dynamic = ConfigKey.class.getDeclaredField("_isDynamic"); dynamic.setAccessible(true); - modifiersField.setInt(dynamic, dynamic.getModifiers() & ~Modifier.FINAL); dynamic.setBoolean(key, false); } } diff --git a/plugins/user-authenticators/ldap/src/test/resources/log4j.xml b/plugins/user-authenticators/ldap/src/test/resources/log4j.xml index c369c454640..6c0ffa264e1 100755 --- a/plugins/user-authenticators/ldap/src/test/resources/log4j.xml +++ b/plugins/user-authenticators/ldap/src/test/resources/log4j.xml @@ -32,7 +32,7 @@ - + diff --git a/plugins/user-authenticators/md5/src/main/resources/META-INF/cloudstack/md5/spring-md5-context.xml b/plugins/user-authenticators/md5/src/main/resources/META-INF/cloudstack/md5/spring-md5-context.xml index 132f1481bb6..a447e731743 100644 --- a/plugins/user-authenticators/md5/src/main/resources/META-INF/cloudstack/md5/spring-md5-context.xml +++ b/plugins/user-authenticators/md5/src/main/resources/META-INF/cloudstack/md5/spring-md5-context.xml @@ -30,5 +30,5 @@ - + diff --git a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2AuthManagerImpl.java b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2AuthManagerImpl.java index 6d7123ebe8e..b65027d6a24 100644 --- a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2AuthManagerImpl.java +++ b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2AuthManagerImpl.java @@ -136,9 +136,9 @@ public class OAuth2AuthManagerImpl extends ManagerBase implements OAuth2AuthMana public OauthProviderVO registerOauthProvider(RegisterOAuthProviderCmd cmd) { String description = cmd.getDescription(); String provider = cmd.getProvider(); - String clientId = cmd.getClientId(); - String redirectUri = cmd.getRedirectUri(); - String secretKey = cmd.getSecretKey(); + String clientId = StringUtils.trim(cmd.getClientId()); + String redirectUri = StringUtils.trim(cmd.getRedirectUri()); + String secretKey = StringUtils.trim(cmd.getSecretKey()); if (!isOAuthPluginEnabled()) { throw new CloudRuntimeException("OAuth is not enabled, please enable to register"); @@ -168,9 +168,9 @@ public class OAuth2AuthManagerImpl extends ManagerBase implements OAuth2AuthMana public OauthProviderVO updateOauthProvider(UpdateOAuthProviderCmd cmd) { Long id = cmd.getId(); String description = cmd.getDescription(); - String clientId = cmd.getClientId(); - String redirectUri = cmd.getRedirectUri(); - String secretKey = cmd.getSecretKey(); + String clientId = StringUtils.trim(cmd.getClientId()); + String redirectUri = StringUtils.trim(cmd.getRedirectUri()); + String secretKey = StringUtils.trim(cmd.getSecretKey()); Boolean enabled = cmd.getEnabled(); OauthProviderVO providerVO = _oauthProviderDao.findById(id); diff --git a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticator.java b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticator.java index 1f38adfd63b..dde50c8bb34 100644 --- a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticator.java +++ b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticator.java @@ -31,15 +31,17 @@ import org.apache.cloudstack.auth.UserOAuth2Authenticator; import javax.inject.Inject; import java.util.Map; +import static org.apache.cloudstack.oauth2.OAuth2AuthManager.OAuth2IsPluginEnabled; + public class OAuth2UserAuthenticator extends AdapterBase implements UserAuthenticator { @Inject - private UserAccountDao _userAccountDao; + private UserAccountDao userAccountDao; @Inject - private UserDao _userDao; + private UserDao userDao; @Inject - private OAuth2AuthManager _userOAuth2mgr; + private OAuth2AuthManager userOAuth2mgr; @Override public Pair authenticate(String username, String password, Long domainId, Map requestParameters) { @@ -47,20 +49,33 @@ public class OAuth2UserAuthenticator extends AdapterBase implements UserAuthenti logger.debug("Trying OAuth2 auth for user: " + username); } - final UserAccount userAccount = _userAccountDao.getUserAccount(username, domainId); + if (!isOAuthPluginEnabled()) { + logger.debug("OAuth2 plugin is disabled"); + return new Pair(false, null); + } else if (requestParameters == null) { + logger.debug("Request parameters are null"); + return new Pair(false, null); + } + + final UserAccount userAccount = userAccountDao.getUserAccount(username, domainId); if (userAccount == null) { logger.debug("Unable to find user with " + username + " in domain " + domainId + ", or user source is not OAUTH2"); return new Pair(false, null); } else { - User user = _userDao.getUser(userAccount.getId()); + User user = userDao.getUser(userAccount.getId()); final String[] provider = (String[])requestParameters.get(ApiConstants.PROVIDER); final String[] emailArray = (String[])requestParameters.get(ApiConstants.EMAIL); final String[] secretCodeArray = (String[])requestParameters.get(ApiConstants.SECRET_CODE); + + if (provider == null) { + return new Pair(false, null); + } + String oauthProvider = ((provider == null) ? null : provider[0]); String email = ((emailArray == null) ? null : emailArray[0]); String secretCode = ((secretCodeArray == null) ? null : secretCodeArray[0]); - UserOAuth2Authenticator authenticator = _userOAuth2mgr.getUserOAuth2AuthenticationProvider(oauthProvider); + UserOAuth2Authenticator authenticator = userOAuth2mgr.getUserOAuth2AuthenticationProvider(oauthProvider); if (user != null && authenticator.verifyUser(email, secretCode)) { return new Pair(true, null); } @@ -73,4 +88,8 @@ public class OAuth2UserAuthenticator extends AdapterBase implements UserAuthenti public String encode(String password) { return null; } + + protected boolean isOAuthPluginEnabled() { + return OAuth2IsPluginEnabled.value(); + } } diff --git a/plugins/user-authenticators/oauth2/src/test/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticatorTest.java b/plugins/user-authenticators/oauth2/src/test/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticatorTest.java index c0d273a5fa5..d1c1889ba99 100644 --- a/plugins/user-authenticators/oauth2/src/test/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticatorTest.java +++ b/plugins/user-authenticators/oauth2/src/test/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticatorTest.java @@ -27,21 +27,29 @@ import org.apache.cloudstack.auth.UserOAuth2Authenticator; import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.MockitoAnnotations; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; import java.util.HashMap; import java.util.Map; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.doReturn; +@RunWith(MockitoJUnitRunner.class) public class OAuth2UserAuthenticatorTest { @Mock @@ -54,13 +62,14 @@ public class OAuth2UserAuthenticatorTest { private OAuth2AuthManager userOAuth2mgr; @InjectMocks + @Spy private OAuth2UserAuthenticator authenticator; - private AutoCloseable closeable; @Before public void setUp() { closeable = MockitoAnnotations.openMocks(this); + doReturn(true).when(authenticator).isOAuthPluginEnabled(); } @After @@ -68,6 +77,7 @@ public class OAuth2UserAuthenticatorTest { closeable.close(); } + @Test public void testAuthenticateWithValidCredentials() { String username = "testuser"; @@ -92,13 +102,13 @@ public class OAuth2UserAuthenticatorTest { Pair result = authenticator.authenticate(username, null, domainId, requestParameters); + assertTrue(result.first()); + assertNull(result.second()); + verify(userAccountDao).getUserAccount(username, domainId); verify(userDao).getUser(userAccount.getId()); verify(userOAuth2mgr).getUserOAuth2AuthenticationProvider(provider[0]); verify(userOAuth2Authenticator).verifyUser(email[0], secretCode[0]); - - assertEquals(true, result.first().booleanValue()); - assertEquals(null, result.second()); } @Test @@ -114,7 +124,7 @@ public class OAuth2UserAuthenticatorTest { UserOAuth2Authenticator userOAuth2Authenticator = mock(UserOAuth2Authenticator.class); when(userAccountDao.getUserAccount(username, domainId)).thenReturn(userAccount); - when(userDao.getUser(userAccount.getId())).thenReturn( user); + when(userDao.getUser(userAccount.getId())).thenReturn(user); when(userOAuth2mgr.getUserOAuth2AuthenticationProvider(provider[0])).thenReturn(userOAuth2Authenticator); when(userOAuth2Authenticator.verifyUser(email[0], secretCode[0])).thenReturn(false); @@ -125,13 +135,13 @@ public class OAuth2UserAuthenticatorTest { Pair result = authenticator.authenticate(username, null, domainId, requestParameters); + assertFalse(result.first()); + assertEquals(OAuth2UserAuthenticator.ActionOnFailedAuthentication.INCREMENT_INCORRECT_LOGIN_ATTEMPT_COUNT, result.second()); + verify(userAccountDao).getUserAccount(username, domainId); verify(userDao).getUser(userAccount.getId()); verify(userOAuth2mgr).getUserOAuth2AuthenticationProvider(provider[0]); verify(userOAuth2Authenticator).verifyUser(email[0], secretCode[0]); - - assertEquals(false, result.first().booleanValue()); - assertEquals(OAuth2UserAuthenticator.ActionOnFailedAuthentication.INCREMENT_INCORRECT_LOGIN_ATTEMPT_COUNT, result.second()); } @Test @@ -151,11 +161,11 @@ public class OAuth2UserAuthenticatorTest { Pair result = authenticator.authenticate(username, null, domainId, requestParameters); + assertFalse(result.first()); + assertNull(result.second()); + verify(userAccountDao).getUserAccount(username, domainId); verify(userDao, never()).getUser(anyLong()); verify(userOAuth2mgr, never()).getUserOAuth2AuthenticationProvider(anyString()); - - assertEquals(false, result.first().booleanValue()); - assertEquals(null, result.second()); } } diff --git a/plugins/user-authenticators/plain-text/src/main/resources/META-INF/cloudstack/plaintext/spring-plaintext-context.xml b/plugins/user-authenticators/plain-text/src/main/resources/META-INF/cloudstack/plaintext/spring-plaintext-context.xml index fccff88c4cb..0b22b010e19 100644 --- a/plugins/user-authenticators/plain-text/src/main/resources/META-INF/cloudstack/plaintext/spring-plaintext-context.xml +++ b/plugins/user-authenticators/plain-text/src/main/resources/META-INF/cloudstack/plaintext/spring-plaintext-context.xml @@ -31,5 +31,5 @@ - + diff --git a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/SAML2LoginAPIAuthenticatorCmd.java b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/SAML2LoginAPIAuthenticatorCmd.java index fb4f4cc00a5..742680fdcc5 100644 --- a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/SAML2LoginAPIAuthenticatorCmd.java +++ b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/SAML2LoginAPIAuthenticatorCmd.java @@ -142,6 +142,14 @@ public class SAML2LoginAPIAuthenticatorCmd extends BaseCmd implements APIAuthent return responseObject; } + protected void checkAndFailOnMissingSAMLSignature(Signature signature) { + if (signature == null && SAML2AuthManager.SAMLCheckSignature.value()) { + logger.error("Failing SAML login due to missing signature in the SAML response and signature check is enforced. " + + "Please check and ensure the IDP configuration has signing certificate or relax the saml2.check.signature setting."); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Signature is missing from the SAML Response. Please contact the Administrator"); + } + } + @Override public String authenticate(final String command, final Map params, final HttpSession session, final InetAddress remoteAddress, final String responseType, final StringBuilder auditTrailSb, final HttpServletRequest req, final HttpServletResponse resp) throws ServerApiException { try { @@ -218,11 +226,13 @@ public class SAML2LoginAPIAuthenticatorCmd extends BaseCmd implements APIAuthent "Received SAML response for a SSO request that we may not have made or has expired, please try logging in again", params, responseType)); } + samlAuthManager.purgeToken(token); // Set IdpId for this session session.setAttribute(SAMLPluginConstants.SAML_IDPID, issuer.getValue()); Signature sig = processedSAMLResponse.getSignature(); + checkAndFailOnMissingSAMLSignature(sig); if (idpMetadata.getSigningCertificate() != null && sig != null) { BasicX509Credential credential = new BasicX509Credential(); credential.setEntityCertificate(idpMetadata.getSigningCertificate()); @@ -236,9 +246,8 @@ public class SAML2LoginAPIAuthenticatorCmd extends BaseCmd implements APIAuthent params, responseType)); } } - if (username == null) { - username = SAMLUtils.getValueFromAssertions(processedSAMLResponse.getAssertions(), SAML2AuthManager.SAMLUserAttributeName.value()); - } + + username = SAMLUtils.getValueFromAssertions(processedSAMLResponse.getAssertions(), SAML2AuthManager.SAMLUserAttributeName.value()); for (Assertion assertion: processedSAMLResponse.getAssertions()) { if (assertion!= null && assertion.getSubject() != null && assertion.getSubject().getNameID() != null) { @@ -270,6 +279,7 @@ public class SAML2LoginAPIAuthenticatorCmd extends BaseCmd implements APIAuthent continue; } Signature encSig = assertion.getSignature(); + checkAndFailOnMissingSAMLSignature(encSig); if (idpMetadata.getSigningCertificate() != null && encSig != null) { BasicX509Credential sigCredential = new BasicX509Credential(); sigCredential.setEntityCertificate(idpMetadata.getSigningCertificate()); diff --git a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAML2AuthManager.java b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAML2AuthManager.java index e52a7e32695..3a4030f9c0d 100644 --- a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAML2AuthManager.java +++ b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAML2AuthManager.java @@ -25,59 +25,63 @@ import java.util.Collection; public interface SAML2AuthManager extends PluggableAPIAuthenticator, PluggableService { - public static final ConfigKey SAMLIsPluginEnabled = new ConfigKey("Advanced", Boolean.class, "saml2.enabled", "false", + ConfigKey SAMLIsPluginEnabled = new ConfigKey("Advanced", Boolean.class, "saml2.enabled", "false", "Indicates whether SAML SSO plugin is enabled or not", true); - public static final ConfigKey SAMLServiceProviderID = new ConfigKey("Advanced", String.class, "saml2.sp.id", "org.apache.cloudstack", + ConfigKey SAMLServiceProviderID = new ConfigKey("Advanced", String.class, "saml2.sp.id", "org.apache.cloudstack", "SAML2 Service Provider Identifier String", true); - public static final ConfigKey SAMLServiceProviderContactPersonName = new ConfigKey("Advanced", String.class, "saml2.sp.contact.person", "CloudStack Developers", + ConfigKey SAMLServiceProviderContactPersonName = new ConfigKey("Advanced", String.class, "saml2.sp.contact.person", "CloudStack Developers", "SAML2 Service Provider Contact Person Name", true); - public static final ConfigKey SAMLServiceProviderContactEmail = new ConfigKey("Advanced", String.class, "saml2.sp.contact.email", "dev@cloudstack.apache.org", + ConfigKey SAMLServiceProviderContactEmail = new ConfigKey("Advanced", String.class, "saml2.sp.contact.email", "dev@cloudstack.apache.org", "SAML2 Service Provider Contact Email Address", true); - public static final ConfigKey SAMLServiceProviderOrgName = new ConfigKey("Advanced", String.class, "saml2.sp.org.name", "Apache CloudStack", + ConfigKey SAMLServiceProviderOrgName = new ConfigKey("Advanced", String.class, "saml2.sp.org.name", "Apache CloudStack", "SAML2 Service Provider Organization Name", true); - public static final ConfigKey SAMLServiceProviderOrgUrl = new ConfigKey("Advanced", String.class, "saml2.sp.org.url", "http://cloudstack.apache.org", + ConfigKey SAMLServiceProviderOrgUrl = new ConfigKey("Advanced", String.class, "saml2.sp.org.url", "http://cloudstack.apache.org", "SAML2 Service Provider Organization URL", true); - public static final ConfigKey SAMLServiceProviderSingleSignOnURL = new ConfigKey("Advanced", String.class, "saml2.sp.sso.url", "http://localhost:8080/client/api?command=samlSso", + ConfigKey SAMLServiceProviderSingleSignOnURL = new ConfigKey("Advanced", String.class, "saml2.sp.sso.url", "http://localhost:8080/client/api?command=samlSso", "SAML2 CloudStack Service Provider Single Sign On URL", true); - public static final ConfigKey SAMLServiceProviderSingleLogOutURL = new ConfigKey("Advanced", String.class, "saml2.sp.slo.url", "http://localhost:8080/client/", + ConfigKey SAMLServiceProviderSingleLogOutURL = new ConfigKey("Advanced", String.class, "saml2.sp.slo.url", "http://localhost:8080/client/", "SAML2 CloudStack Service Provider Single Log Out URL", true); - public static final ConfigKey SAMLCloudStackRedirectionUrl = new ConfigKey("Advanced", String.class, "saml2.redirect.url", "http://localhost:8080/client", + ConfigKey SAMLCloudStackRedirectionUrl = new ConfigKey("Advanced", String.class, "saml2.redirect.url", "http://localhost:8080/client", "The CloudStack UI url the SSO should redirected to when successful", true); - public static final ConfigKey SAMLUserAttributeName = new ConfigKey("Advanced", String.class, "saml2.user.attribute", "uid", + ConfigKey SAMLUserAttributeName = new ConfigKey("Advanced", String.class, "saml2.user.attribute", "uid", "Attribute name to be looked for in SAML response that will contain the username", true); - public static final ConfigKey SAMLIdentityProviderMetadataURL = new ConfigKey("Advanced", String.class, "saml2.idp.metadata.url", "https://openidp.feide.no/simplesaml/saml2/idp/metadata.php", + ConfigKey SAMLIdentityProviderMetadataURL = new ConfigKey("Advanced", String.class, "saml2.idp.metadata.url", "https://openidp.feide.no/simplesaml/saml2/idp/metadata.php", "SAML2 Identity Provider Metadata XML Url", true); - public static final ConfigKey SAMLDefaultIdentityProviderId = new ConfigKey("Advanced", String.class, "saml2.default.idpid", "https://openidp.feide.no", + ConfigKey SAMLDefaultIdentityProviderId = new ConfigKey("Advanced", String.class, "saml2.default.idpid", "https://openidp.feide.no", "The default IdP entity ID to use only in case of multiple IdPs", true); - public static final ConfigKey SAMLSignatureAlgorithm = new ConfigKey<>(String.class, "saml2.sigalg", "Advanced", "SHA1", + ConfigKey SAMLSignatureAlgorithm = new ConfigKey<>(String.class, "saml2.sigalg", "Advanced", "SHA1", "The algorithm to use to when signing a SAML request. Default is SHA1, allowed algorithms: SHA1, SHA256, SHA384, SHA512", true, ConfigKey.Scope.Global, null, null, null, null, null, ConfigKey.Kind.Select, "SHA1,SHA256,SHA384,SHA512"); - public static final ConfigKey SAMLAppendDomainSuffix = new ConfigKey("Advanced", Boolean.class, "saml2.append.idpdomain", "false", + ConfigKey SAMLAppendDomainSuffix = new ConfigKey("Advanced", Boolean.class, "saml2.append.idpdomain", "false", "If enabled, create account/user dialog with SAML SSO enabled will append the IdP domain to the user or account name in the UI dialog", true); - public static final ConfigKey SAMLTimeout = new ConfigKey("Advanced", Integer.class, "saml2.timeout", "1800", + ConfigKey SAMLTimeout = new ConfigKey("Advanced", Integer.class, "saml2.timeout", "1800", "SAML2 IDP Metadata refresh interval in seconds, minimum value is set to 300", true); - public SAMLProviderMetadata getSPMetadata(); - public SAMLProviderMetadata getIdPMetadata(String entityId); - public Collection getAllIdPMetadata(); + ConfigKey SAMLCheckSignature = new ConfigKey("Advanced", Boolean.class, "saml2.check.signature", "true", + "When enabled (default and recommended), SAML2 signature checks are enforced and lack of signature in the SAML SSO response will cause login exception. Disabling this is not advisable but provided for backward compatibility for users who are able to accept the risks.", false); - public boolean isUserAuthorized(Long userId, String entityId); - public boolean authorizeUser(Long userId, String entityId, boolean enable); + SAMLProviderMetadata getSPMetadata(); + SAMLProviderMetadata getIdPMetadata(String entityId); + Collection getAllIdPMetadata(); - public void saveToken(String authnId, String domain, String entity); - public SAMLTokenVO getToken(String authnId); - public void expireTokens(); + boolean isUserAuthorized(Long userId, String entityId); + boolean authorizeUser(Long userId, String entityId, boolean enable); + + void saveToken(String authnId, String domain, String entity); + SAMLTokenVO getToken(String authnId); + void purgeToken(SAMLTokenVO token); + void expireTokens(); } diff --git a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAML2AuthManagerImpl.java b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAML2AuthManagerImpl.java index 0e8790d6558..230c53ac4a9 100644 --- a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAML2AuthManagerImpl.java +++ b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAML2AuthManagerImpl.java @@ -485,6 +485,13 @@ public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManage return _samlTokenDao.findByUuid(authnId); } + @Override + public void purgeToken(SAMLTokenVO token) { + if (token != null) { + _samlTokenDao.remove(token.getId()); + } + } + @Override public void expireTokens() { _samlTokenDao.expireTokens(); @@ -533,6 +540,6 @@ public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManage SAMLServiceProviderSingleSignOnURL, SAMLServiceProviderSingleLogOutURL, SAMLCloudStackRedirectionUrl, SAMLUserAttributeName, SAMLIdentityProviderMetadataURL, SAMLDefaultIdentityProviderId, - SAMLSignatureAlgorithm, SAMLAppendDomainSuffix, SAMLTimeout}; + SAMLSignatureAlgorithm, SAMLAppendDomainSuffix, SAMLTimeout, SAMLCheckSignature}; } } diff --git a/plugins/user-authenticators/saml2/src/test/java/org/apache/cloudstack/api/command/SAML2LoginAPIAuthenticatorCmdTest.java b/plugins/user-authenticators/saml2/src/test/java/org/apache/cloudstack/api/command/SAML2LoginAPIAuthenticatorCmdTest.java index 39c8c231bf0..48a3139052d 100644 --- a/plugins/user-authenticators/saml2/src/test/java/org/apache/cloudstack/api/command/SAML2LoginAPIAuthenticatorCmdTest.java +++ b/plugins/user-authenticators/saml2/src/test/java/org/apache/cloudstack/api/command/SAML2LoginAPIAuthenticatorCmdTest.java @@ -271,6 +271,30 @@ public class SAML2LoginAPIAuthenticatorCmdTest { verifyTestWhenFailToAuthenticateThrowExceptionOrRedirectToUrl(false, hasThrownServerApiException, 0, 0); } + private void overrideDefaultConfigValue(final ConfigKey configKey, final String name, final Object o) throws IllegalAccessException, NoSuchFieldException { + Field f = ConfigKey.class.getDeclaredField(name); + f.setAccessible(true); + f.set(configKey, o); + } + + @Test + public void testFailOnSAMLSignatureCheckWhenFalse() throws NoSuchFieldException, IllegalAccessException { + overrideDefaultConfigValue(SAML2AuthManager.SAMLCheckSignature, "_value", false); + SAML2LoginAPIAuthenticatorCmd cmd = new SAML2LoginAPIAuthenticatorCmd(); + try { + cmd.checkAndFailOnMissingSAMLSignature(null); + } catch(Exception e) { + Assert.fail("This shouldn't throw any exception"); + } + } + + @Test(expected = ServerApiException.class) + public void testFailOnSAMLSignatureCheckWhenTrue() throws NoSuchFieldException, IllegalAccessException { + overrideDefaultConfigValue(SAML2AuthManager.SAMLCheckSignature, "_value", true); + SAML2LoginAPIAuthenticatorCmd cmd = new SAML2LoginAPIAuthenticatorCmd(); + cmd.checkAndFailOnMissingSAMLSignature(null); + } + private UserAccountVO configureTestWhenFailToAuthenticateThrowExceptionOrRedirectToUrl(String entity, String configurationValue, Boolean isUserAuthorized) throws IOException { Mockito.when(samlAuthManager.isUserAuthorized(nullable(Long.class), nullable(String.class))).thenReturn(isUserAuthorized); diff --git a/plugins/user-authenticators/sha256salted/src/main/resources/META-INF/cloudstack/sha256salted/spring-sha256salted-context.xml b/plugins/user-authenticators/sha256salted/src/main/resources/META-INF/cloudstack/sha256salted/spring-sha256salted-context.xml index 3e29fd9ddba..593c8282cb5 100644 --- a/plugins/user-authenticators/sha256salted/src/main/resources/META-INF/cloudstack/sha256salted/spring-sha256salted-context.xml +++ b/plugins/user-authenticators/sha256salted/src/main/resources/META-INF/cloudstack/sha256salted/spring-sha256salted-context.xml @@ -30,5 +30,5 @@ - + diff --git a/plugins/user-two-factor-authenticators/static-pin/src/main/resources/META-INF/cloudstack/staticpin/spring-staticpin-context.xml b/plugins/user-two-factor-authenticators/static-pin/src/main/resources/META-INF/cloudstack/staticpin/spring-staticpin-context.xml index ac27ba527db..6b30a98ef07 100644 --- a/plugins/user-two-factor-authenticators/static-pin/src/main/resources/META-INF/cloudstack/staticpin/spring-staticpin-context.xml +++ b/plugins/user-two-factor-authenticators/static-pin/src/main/resources/META-INF/cloudstack/staticpin/spring-staticpin-context.xml @@ -31,5 +31,5 @@ - + diff --git a/plugins/user-two-factor-authenticators/totp/src/main/resources/META-INF/cloudstack/totp/spring-google-context.xml b/plugins/user-two-factor-authenticators/totp/src/main/resources/META-INF/cloudstack/totp/spring-google-context.xml index 84a0b0cfcb7..73ab7b636d4 100644 --- a/plugins/user-two-factor-authenticators/totp/src/main/resources/META-INF/cloudstack/totp/spring-google-context.xml +++ b/plugins/user-two-factor-authenticators/totp/src/main/resources/META-INF/cloudstack/totp/spring-google-context.xml @@ -31,5 +31,5 @@ - + diff --git a/pom.xml b/pom.xml index 5cd22870916..29fc939f553 100644 --- a/pom.xml +++ b/pom.xml @@ -58,7 +58,7 @@ 11 target build/replace.properties - -Djava.security.egd=file:/dev/./urandom -noverify + -Djava.security.egd=file:/dev/./urandom -noverify --add-opens=java.base/java.lang=ALL-UNNAMED --add-exports=java.base/sun.security.x509=ALL-UNNAMED --add-opens=java.base/javax.net.ssl=ALL-UNNAMED 1.8 @@ -79,6 +79,7 @@ 3.8.2 2.22.2 4.4.1 + 3.2.0 2.19.0 @@ -89,7 +90,7 @@ 1.15 1.5.0 4.4 - 1.21 + 1.26.0 1.3 1.4 3.1 @@ -101,6 +102,7 @@ 1.10 1.3.3 2.9.0 + 5.1.0 0.5 2.6 2.9.0 @@ -137,6 +139,7 @@ 3.2.14 2.6.11 0.0.27 + 3.0.2 1.42.3 2.4.17 1.7.2 @@ -166,12 +169,13 @@ 2.7.0 0.5.3 1.5.0-b01 + 0.9.14 8.0.33 2.0.4 10.1 2.6.6 0.6.0 - 0.3.0 + 0.5.2 0.10.2 3.4.4_1 4.0.1 @@ -185,7 +189,7 @@ 1.4.20 5.3.26 0.5.4 - 1.12.0 + 3.1.7 @@ -362,6 +366,7 @@ commons-daemon ${cs.daemon.version} + org.apache.commons commons-dbcp2 @@ -373,6 +378,11 @@ + + com.zaxxer + HikariCP + ${cs.hikaricp.version} + commons-discovery commons-discovery @@ -453,6 +463,12 @@ reload4j ${cs.reload4j.version} + + mysql + mysql-connector-java + ${cs.mysql.version} + test + log4j apache-log4j-extras @@ -768,6 +784,11 @@ javax.inject 1 + + com.github.ben-manes.caffeine + caffeine + ${cs.caffeine.version} + @@ -933,20 +954,6 @@ - - - org.codehaus.gmaven - gmaven-plugin - [1.3,) - - compile - testCompile - - - - - - org.apache.maven.plugins @@ -1013,6 +1020,7 @@ .idea/ .metadata/** .git/** + .github/linters/codespell.txt .gitignore CHANGES.md CONTRIBUTING.md diff --git a/python/lib/cloud_utils.py b/python/lib/cloud_utils.py index d424bf1f023..16d9f17e8b4 100644 --- a/python/lib/cloud_utils.py +++ b/python/lib/cloud_utils.py @@ -375,7 +375,7 @@ def list_zonespods(host): x = [ (zonename,podname) for pod in dom.childNodes[0].childNodes for podname in [ x.childNodes[0].wholeText for x in pod.childNodes if x.tagName == "name" ] - for zonename in [ x.childNodes[0].wholeText for x in pod.childNodes if x.tagName == "zonename" ] + for zonename in [ x.childNodes[0].wholeText for x in pod.childNodes if x.tagName == "zonename" ] ] return x @@ -433,7 +433,7 @@ def setup_agent_config(configfile, host, zone, pod, cluster, guid, pubNic, prvNi if guid != None: confopts['guid'] = guid else: - if not "guid" in confopts: + if "guid" not in confopts: stderr("Generating GUID for this Agent") confopts['guid'] = uuidgen().stdout.strip() @@ -491,7 +491,7 @@ def setup_consoleproxy_config(configfile, host, zone, pod): confopts = dict([ m.split("=",1) for m in lines if "=" in m and not m.startswith("#") ]) confposes = dict([ (m.split("=",1)[0],n) for n,m in enumerate(lines) if "=" in m and not m.startswith("#") ]) - if not "guid" in confopts: + if "guid" not in confopts: stderr("Generating GUID for this Console Proxy") confopts['guid'] = uuidgen().stdout.strip() diff --git a/python/lib/cloudutils/configFileOps.py b/python/lib/cloudutils/configFileOps.py index 41e9c7a1e8e..c061071b96e 100644 --- a/python/lib/cloudutils/configFileOps.py +++ b/python/lib/cloudutils/configFileOps.py @@ -63,7 +63,7 @@ class configFileOps: newLines = [] if os.path.exists(self.fileName) and os.path.isfile(self.fileName): fp = open(self.fileName, "r") - for line in fp.readlines(): + for line in fp.readlines(): matched = False for entry in self.entries: if entry.op == "add": diff --git a/python/lib/cloudutils/syscfg.py b/python/lib/cloudutils/syscfg.py index 19032ce4aff..fe68b02dfe8 100755 --- a/python/lib/cloudutils/syscfg.py +++ b/python/lib/cloudutils/syscfg.py @@ -114,7 +114,7 @@ class sysConfigAgent(sysConfig): pass if size != -1 and size < (30 * 1024 * 1024): - raise CloudRuntimeException("Need at least 30G free disk space under /var/lib/libvirt/images") + raise CloudRuntimeException("Need at least 30G free disk space under /var/lib/libvirt/images") #check memory mem = -1 @@ -124,7 +124,7 @@ class sysConfigAgent(sysConfig): pass if mem != -1 and mem < 1: - raise CloudRuntimeException("Need at least 1G memory") + raise CloudRuntimeException("Need at least 1G memory") if os.geteuid() != 0: diff --git a/python/lib/cloudutils/utilities.py b/python/lib/cloudutils/utilities.py index 5a6114f1011..5b07ff1eff6 100755 --- a/python/lib/cloudutils/utilities.py +++ b/python/lib/cloudutils/utilities.py @@ -47,11 +47,11 @@ class bash: alarm(0) except Alarm: os.kill(self.process.pid, SIGKILL) - raise CloudRuntimeException("Timeout during command execution") + raise CloudRuntimeException("Timeout during command execution") self.success = self.process.returncode == 0 except: - raise CloudRuntimeException(formatExceptionInfo()) + raise CloudRuntimeException(formatExceptionInfo()) if not self.success: logging.debug("Failed to execute:" + self.getErrMsg()) diff --git a/quickcloud/src/main/resources/META-INF/cloudstack/core/spring-quickcloud-core-context-override.xml b/quickcloud/src/main/resources/META-INF/cloudstack/core/spring-quickcloud-core-context-override.xml index 6074ea1ceb2..a434bbe2412 100644 --- a/quickcloud/src/main/resources/META-INF/cloudstack/core/spring-quickcloud-core-context-override.xml +++ b/quickcloud/src/main/resources/META-INF/cloudstack/core/spring-quickcloud-core-context-override.xml @@ -25,7 +25,7 @@ http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context.xsd" - > + > diff --git a/scripts/network/juniper/application-add.xml b/scripts/network/juniper/application-add.xml index 177329a0359..72fca676ce2 100644 --- a/scripts/network/juniper/application-add.xml +++ b/scripts/network/juniper/application-add.xml @@ -20,11 +20,11 @@ under the License. - -%name% -%protocol% + +%name% +%protocol% %dest-port-icmp% - + diff --git a/scripts/network/juniper/application-getone.xml b/scripts/network/juniper/application-getone.xml index 4f6b5546a5c..69e5417a58b 100644 --- a/scripts/network/juniper/application-getone.xml +++ b/scripts/network/juniper/application-getone.xml @@ -20,9 +20,9 @@ under the License. - -%name% - + +%name% + diff --git a/scripts/network/juniper/dest-nat-rule-add.xml b/scripts/network/juniper/dest-nat-rule-add.xml index 2ef1df29acc..2f043d614f7 100644 --- a/scripts/network/juniper/dest-nat-rule-add.xml +++ b/scripts/network/juniper/dest-nat-rule-add.xml @@ -49,8 +49,4 @@ under the License. - - - - - + diff --git a/scripts/network/juniper/guest-vlan-filter-term-add.xml b/scripts/network/juniper/guest-vlan-filter-term-add.xml index d0f52a0be44..9d4150958cc 100644 --- a/scripts/network/juniper/guest-vlan-filter-term-add.xml +++ b/scripts/network/juniper/guest-vlan-filter-term-add.xml @@ -19,17 +19,17 @@ under the License. - - -%filter-name% - -%term-name% - -%term-name% - - - - + + +%filter-name% + +%term-name% + +%term-name% + + + + diff --git a/scripts/network/juniper/private-interface-add.xml b/scripts/network/juniper/private-interface-add.xml index f0ccb6e0e76..d291597a57a 100644 --- a/scripts/network/juniper/private-interface-add.xml +++ b/scripts/network/juniper/private-interface-add.xml @@ -20,21 +20,21 @@ under the License. - -%private-interface-name% - - -%vlan-id% -%vlan-id% - - -
    -%private-interface-ip% -
    -
    -
    -
    -
    + +%private-interface-name% + + +%vlan-id% +%vlan-id% + + +
    +%private-interface-ip% +
    +
    +
    +
    +
    diff --git a/scripts/network/juniper/private-interface-getone.xml b/scripts/network/juniper/private-interface-getone.xml index 474e6719366..bdbb19b412c 100644 --- a/scripts/network/juniper/private-interface-getone.xml +++ b/scripts/network/juniper/private-interface-getone.xml @@ -20,13 +20,13 @@ under the License. - -%private-interface-name% - - -%vlan-id% - - + +%private-interface-name% + + +%vlan-id% + + diff --git a/scripts/network/juniper/private-interface-with-filters-add.xml b/scripts/network/juniper/private-interface-with-filters-add.xml index 3ce8c55b242..ea6e01f6fb1 100644 --- a/scripts/network/juniper/private-interface-with-filters-add.xml +++ b/scripts/network/juniper/private-interface-with-filters-add.xml @@ -20,14 +20,14 @@ under the License. - -%private-interface-name% - - -%vlan-id% -%vlan-id% - - + +%private-interface-name% + + +%vlan-id% +%vlan-id% + + %input-filter-name% @@ -35,14 +35,14 @@ under the License. %output-filter-name% - -
    -%private-interface-ip% -
    -
    -
    -
    -
    + +
    +%private-interface-ip% +
    +
    +
    +
    +
    diff --git a/scripts/network/juniper/proxy-arp-add.xml b/scripts/network/juniper/proxy-arp-add.xml index ae6dee0f239..118311fb620 100644 --- a/scripts/network/juniper/proxy-arp-add.xml +++ b/scripts/network/juniper/proxy-arp-add.xml @@ -19,17 +19,17 @@ under the License. - - - - -%public-interface-name% -
    -%public-ip-address% -
    -
    -
    -
    + + + + +%public-interface-name% +
    +%public-ip-address% +
    +
    +
    +
    diff --git a/scripts/network/juniper/proxy-arp-getall.xml b/scripts/network/juniper/proxy-arp-getall.xml index 3f23a22cd5a..3ec425c28fc 100644 --- a/scripts/network/juniper/proxy-arp-getall.xml +++ b/scripts/network/juniper/proxy-arp-getall.xml @@ -19,12 +19,12 @@ under the License. - - - -%interface-name% - - + + + +%interface-name% + + diff --git a/scripts/network/juniper/proxy-arp-getone.xml b/scripts/network/juniper/proxy-arp-getone.xml index e43dc0bc5ea..3299dac9d0e 100644 --- a/scripts/network/juniper/proxy-arp-getone.xml +++ b/scripts/network/juniper/proxy-arp-getone.xml @@ -19,17 +19,17 @@ under the License. - - - - -%public-interface-name% -
    -%public-ip-address% -
    -
    -
    -
    + + + + +%public-interface-name% +
    +%public-ip-address% +
    +
    +
    +
    diff --git a/scripts/network/juniper/public-ip-filter-term-add.xml b/scripts/network/juniper/public-ip-filter-term-add.xml index 9aad4c23990..e8ffd7aad41 100644 --- a/scripts/network/juniper/public-ip-filter-term-add.xml +++ b/scripts/network/juniper/public-ip-filter-term-add.xml @@ -19,22 +19,22 @@ under the License. - - -%filter-name% - -%term-name% - -<%address-type%> -%ip-address% - - - -%term-name% - - - - + + +%filter-name% + +%term-name% + +<%address-type%> +%ip-address% + + + +%term-name% + + + + diff --git a/scripts/network/juniper/security-policy-group.xml b/scripts/network/juniper/security-policy-group.xml index 1d0dc8cba12..46b57b0bfcb 100644 --- a/scripts/network/juniper/security-policy-group.xml +++ b/scripts/network/juniper/security-policy-group.xml @@ -19,13 +19,13 @@ under the License. - - - -%from-zone% -%to-zone% - - + + + +%from-zone% +%to-zone% + + diff --git a/scripts/network/juniper/zone-interface-add.xml b/scripts/network/juniper/zone-interface-add.xml index 9b2d37278ac..5f6d4c0416a 100644 --- a/scripts/network/juniper/zone-interface-add.xml +++ b/scripts/network/juniper/zone-interface-add.xml @@ -19,15 +19,15 @@ under the License. - - - -%private-zone-name% - -%zone-interface-name% - - - + + + +%private-zone-name% + +%zone-interface-name% + + + diff --git a/scripts/network/juniper/zone-interface-getone.xml b/scripts/network/juniper/zone-interface-getone.xml index 4bc5c4bc0d2..2078c015ba4 100644 --- a/scripts/network/juniper/zone-interface-getone.xml +++ b/scripts/network/juniper/zone-interface-getone.xml @@ -19,15 +19,15 @@ under the License. - - - -%private-zone-name% - -%zone-interface-name% - - - + + + +%private-zone-name% + +%zone-interface-name% + + + diff --git a/scripts/network/ping/baremetal_user_data.py b/scripts/network/ping/baremetal_user_data.py index 5a189353467..02fe845392c 100755 --- a/scripts/network/ping/baremetal_user_data.py +++ b/scripts/network/ping/baremetal_user_data.py @@ -38,7 +38,7 @@ def writeIfNotHere(fileName, texts): texts = [ "%s\n" % t for t in texts ] need = False for t in texts: - if not t in entries: + if t not in entries: entries.append(t) need = True diff --git a/scripts/storage/multipath/copyVolume.sh b/scripts/storage/multipath/copyVolume.sh index d169198251b..8e6609ea108 100755 --- a/scripts/storage/multipath/copyVolume.sh +++ b/scripts/storage/multipath/copyVolume.sh @@ -22,7 +22,7 @@ OUTPUT_FILE=${3:?"Output file/path is required"} echo "$(date): qemu-img convert -n -p -W -t none -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE}" -qemu-img convert -n -p -W -t none -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE} && { +qemu-img convert -n -p -W -t writeback -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE} && { # if its a block device make sure we flush caches before exiting lsblk ${OUTPUT_FILE} >/dev/null 2>&1 && { blockdev --flushbufs ${OUTPUT_FILE} diff --git a/scripts/vm/hypervisor/kvm/nasbackup.sh b/scripts/vm/hypervisor/kvm/nasbackup.sh new file mode 100755 index 00000000000..5b264321bd8 --- /dev/null +++ b/scripts/vm/hypervisor/kvm/nasbackup.sh @@ -0,0 +1,169 @@ +#!/usr/bin/bash +## Licensed to the Apache Software Foundation (ASF) under one +## or more contributor license agreements. See the NOTICE file +## distributed with this work for additional information +## regarding copyright ownership. The ASF licenses this file +## to you under the Apache License, Version 2.0 (the +## "License"); you may not use this file except in compliance +## with the License. You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, +## software distributed under the License is distributed on an +## "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +## KIND, either express or implied. See the License for the +## specific language governing permissions and limitations +## under the License. + +set -e + +# CloudStack B&R NAS Backup and Recovery Tool for KVM + +# TODO: do libvirt/logging etc checks + +### Declare variables ### + +OP="" +VM="" +NAS_TYPE="" +NAS_ADDRESS="" +MOUNT_OPTS="" +BACKUP_DIR="" +DISK_PATHS="" + +### Operation methods ### + +backup_running_vm() { + mount_operation + mkdir -p $dest + + name="root" + echo "" > $dest/backup.xml + for disk in $(virsh -c qemu:///system domblklist $VM --details 2>/dev/null | awk '/disk/{print$3}'); do + volpath=$(virsh -c qemu:///system domblklist $VM --details | awk "/$disk/{print $4}" | sed 's/.*\///') + echo "" >> $dest/backup.xml + name="datadisk" + done + echo "" >> $dest/backup.xml + + # Start push backup + virsh -c qemu:///system backup-begin --domain $VM --backupxml $dest/backup.xml > /dev/null 2>/dev/null + + # Backup domain information + virsh -c qemu:///system dumpxml $VM > $dest/domain-config.xml 2>/dev/null + virsh -c qemu:///system dominfo $VM > $dest/dominfo.xml 2>/dev/null + virsh -c qemu:///system domiflist $VM > $dest/domiflist.xml 2>/dev/null + virsh -c qemu:///system domblklist $VM > $dest/domblklist.xml 2>/dev/null + + until virsh -c qemu:///system domjobinfo $VM --completed --keep-completed 2>/dev/null | grep "Completed" > /dev/null; do + sleep 5 + done + rm -f $dest/backup.xml + sync + + # Print statistics + virsh -c qemu:///system domjobinfo $VM --completed + du -sb $dest | cut -f1 + + umount $mount_point + rmdir $mount_point +} + +backup_stopped_vm() { + mount_operation + mkdir -p $dest + + IFS="," + + name="root" + for disk in $DISK_PATHS; do + volUuid="${disk##*/}" + qemu-img convert -O qcow2 $disk $dest/$name.$volUuid.qcow2 + name="datadisk" + done + sync + + ls -l --numeric-uid-gid $dest | awk '{print $5}' +} + +delete_backup() { + mount_operation + + rm -frv $dest + sync + umount $mount_point + rmdir $mount_point +} + +mount_operation() { + mount_point=$(mktemp -d -t csbackup.XXXXX) + dest="$mount_point/${BACKUP_DIR}" + mount -t ${NAS_TYPE} ${NAS_ADDRESS} ${mount_point} $([[ ! -z "${MOUNT_OPTS}" ]] && echo -o ${MOUNT_OPTS}) +} + +function usage { + echo "" + echo "Usage: $0 -o -v|--vm -t -s -m -p -d " + echo "" + exit 1 +} + +while [[ $# -gt 0 ]]; do + case $1 in + -o|--operation) + OP="$2" + shift + shift + ;; + -v|--vm) + VM="$2" + shift + shift + ;; + -t|--type) + NAS_TYPE="$2" + shift + shift + ;; + -s|--storage) + NAS_ADDRESS="$2" + shift + shift + ;; + -m|--mount) + MOUNT_OPTS="$2" + shift + shift + ;; + -p|--path) + BACKUP_DIR="$2" + shift + shift + ;; + -d|--diskpaths) + DISK_PATHS="$2" + shift + shift + ;; + -h|--help) + usage + shift + ;; + *) + echo "Invalid option: $1" + usage + ;; + esac +done + +if [ "$OP" = "backup" ]; then + STATE=$(virsh -c qemu:///system list | grep $VM | awk '{print $3}') + if [ "$STATE" = "running" ]; then + backup_running_vm + else + backup_stopped_vm + fi +elif [ "$OP" = "delete" ]; then + delete_backup +fi diff --git a/scripts/vm/hypervisor/xenserver/perfmon.py b/scripts/vm/hypervisor/xenserver/perfmon.py index c310cdcf356..59c1ac3b367 100755 --- a/scripts/vm/hypervisor/xenserver/perfmon.py +++ b/scripts/vm/hypervisor/xenserver/perfmon.py @@ -179,7 +179,7 @@ class RRDUpdates: (cf, vm_or_host, uuid, param) = col_meta_data.split(':') if vm_or_host == 'vm': # Create a report for this VM if it doesn't exist - if not uuid in self.vm_reports: + if uuid not in self.vm_reports: self.vm_reports[uuid] = VMReport(uuid) # Update the VMReport with the col data and meta data vm_report = self.vm_reports[uuid] diff --git a/scripts/vm/network/security_group.py b/scripts/vm/network/security_group.py index 8d72557fb4f..d71e27eb264 100755 --- a/scripts/vm/network/security_group.py +++ b/scripts/vm/network/security_group.py @@ -938,7 +938,7 @@ def cleanup_rules(): vmpresent = False for vm in vmsInHost: - if vm_name in vm: + if vm_name in vm: vmpresent = True break @@ -958,7 +958,7 @@ def cleanup_rules(): vm_name = chain vmpresent = False for vm in vmsInHost: - if vm_name in vm: + if vm_name in vm: vmpresent = True break @@ -1102,7 +1102,7 @@ def add_network_rules(vm_name, vm_id, vm_ip, vm_ip6, signature, seqno, vmMac, ru changes = check_rule_log_for_vm(vmName, vm_id, vm_ip, domId, signature, seqno) - if not 1 in changes: + if 1 not in changes: logging.debug("Rules already programmed for vm " + vm_name) return True diff --git a/scripts/vm/network/vnet/ovstunnel.py b/scripts/vm/network/vnet/ovstunnel.py index 304e4920895..a39b6b18ecc 100755 --- a/scripts/vm/network/vnet/ovstunnel.py +++ b/scripts/vm/network/vnet/ovstunnel.py @@ -202,7 +202,7 @@ def create_tunnel(bridge, remote_ip, key, src_host, dst_host): key_validation = lib.do_cmd(verify_interface_key) ip_validation = lib.do_cmd(verify_interface_ip) - if not key in str(key_validation) or not remote_ip in str(ip_validation): + if key not in str(key_validation) or remote_ip not in str(ip_validation): logging.debug("WARNING: Unexpected output while verifying " + "interface %s on bridge %s" % (name, bridge)) # return "FAILURE:VERIFY_INTERFACE_FAILED" diff --git a/packaging/centos8/cloudstack-agent.te b/server/conf/cloudstack-management.logrotate.in similarity index 71% rename from packaging/centos8/cloudstack-agent.te rename to server/conf/cloudstack-management.logrotate.in index 4259e173a46..1a714a339a8 100644 --- a/packaging/centos8/cloudstack-agent.te +++ b/server/conf/cloudstack-management.logrotate.in @@ -5,9 +5,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -15,19 +15,14 @@ # specific language governing permissions and limitations # under the License. -module cloudstack-agent 1.0; - -require { - type nfs_t; - type system_conf_t; - type mount_t; - type qemu_t; - class file unlink; - class filesystem getattr; +/var/log/cloudstack/management/management-server.out /var/log/cloudstack/management/management-server.err { + su root root + copytruncate + daily + rotate 5 + compress + missingok + size 10M + dateext + dateformat -%Y-%m-%d } - -#============= mount_t ============== -allow mount_t system_conf_t:file unlink; - -#============= qemu_t ============== -allow qemu_t nfs_t:filesystem getattr; diff --git a/server/conf/log4j-cloud.xml.in b/server/conf/log4j-cloud.xml.in index d466f7068b7..9a8e5dc7bf3 100755 --- a/server/conf/log4j-cloud.xml.in +++ b/server/conf/log4j-cloud.xml.in @@ -31,7 +31,7 @@ under the License. - + @@ -40,7 +40,7 @@ under the License. - + @@ -49,7 +49,7 @@ under the License. - + @@ -67,7 +67,7 @@ under the License. - + diff --git a/server/pom.xml b/server/pom.xml index e18dcb5fe28..8f7f5e85f86 100644 --- a/server/pom.xml +++ b/server/pom.xml @@ -26,12 +26,6 @@ cloudstack 4.20.0.0-SNAPSHOT - - - juniper-tungsten-api - https://github.com/radu-todirica/tungsten-api/raw/master - - @@ -101,6 +95,11 @@ commons-math3 ${cs.commons-math3.version} + + com.github.spullara.mustache.java + compiler + ${cs.mustache.version} + org.apache.cloudstack cloud-utils @@ -164,6 +163,11 @@ cloud-framework-agent-lb ${project.version} + + org.apache.cloudstack + cloud-framework-db + ${project.version} + org.apache.cloudstack cloud-engine-storage-configdrive @@ -187,11 +191,6 @@ metrics-jvm 3.0.2 - - net.juniper.tungsten - juniper-tungsten-api - 2.0 - @@ -218,7 +217,7 @@ org.apache.maven.plugins maven-surefire-plugin - @{argLine} -Xmx2048m -XX:MaxPermSize=512m -Djava.security.egd=file:/dev/./urandom + @{argLine} -Xmx2048m -XX:MaxMetaspaceSize=512m -Djava.security.egd=file:/dev/./urandom %regex[.*[0-9]*To[0-9]*.*Test.*] com/cloud/upgrade/AdvanceZone223To224UpgradeTest diff --git a/server/src/main/java/com/cloud/acl/DomainChecker.java b/server/src/main/java/com/cloud/acl/DomainChecker.java index 729c7a9e43a..e9f60ea7aa1 100644 --- a/server/src/main/java/com/cloud/acl/DomainChecker.java +++ b/server/src/main/java/com/cloud/acl/DomainChecker.java @@ -206,7 +206,7 @@ public class DomainChecker extends AdapterBase implements SecurityChecker { return true; } else if (entity instanceof Network && accessType != null && accessType == AccessType.UseEntry) { - _networkMgr.checkNetworkPermissions(caller, (Network)entity); + _networkMgr.checkNetworkPermissions(caller, (Network) entity); } else if (entity instanceof Network && accessType != null && accessType == AccessType.OperateEntry) { _networkMgr.checkNetworkOperatePermissions(caller, (Network)entity); } else if (entity instanceof VirtualRouter) { @@ -214,30 +214,58 @@ public class DomainChecker extends AdapterBase implements SecurityChecker { } else if (entity instanceof AffinityGroup) { return false; } else { - if (_accountService.isNormalUser(caller.getId())) { - Account account = _accountDao.findById(entity.getAccountId()); - String errorMessage = String.format("%s does not have permission to operate with resource", caller); - if (account != null && account.getType() == Account.Type.PROJECT) { - //only project owner can delete/modify the project - if (accessType != null && accessType == AccessType.ModifyProject) { - if (!_projectMgr.canModifyProjectAccount(caller, account.getId())) { - throw new PermissionDeniedException(errorMessage); - } - } else if (!_projectMgr.canAccessProjectAccount(caller, account.getId())) { - throw new PermissionDeniedException(errorMessage); - } - checkOperationPermitted(caller, entity); - } else { - if (caller.getId() != entity.getAccountId()) { - throw new PermissionDeniedException(errorMessage); - } - } - } + validateCallerHasAccessToEntityOwner(caller, entity, accessType); } return true; } - private boolean checkOperationPermitted(Account caller, ControlledEntity entity) { + protected void validateCallerHasAccessToEntityOwner(Account caller, ControlledEntity entity, AccessType accessType) { + PermissionDeniedException exception = new PermissionDeniedException("Caller does not have permission to operate with provided resource."); + String entityLog = String.format("entity [owner ID: %d, type: %s]", entity.getAccountId(), + entity.getEntityType().getSimpleName()); + + if (_accountService.isRootAdmin(caller.getId())) { + return; + } + + if (caller.getId() == entity.getAccountId()) { + return; + } + + Account owner = _accountDao.findById(entity.getAccountId()); + if (owner == null) { + logger.error(String.format("Owner not found for %s", entityLog)); + throw exception; + } + + Account.Type callerAccountType = caller.getType(); + if ((callerAccountType == Account.Type.DOMAIN_ADMIN || callerAccountType == Account.Type.RESOURCE_DOMAIN_ADMIN) && + _domainDao.isChildDomain(caller.getDomainId(), owner.getDomainId())) { + return; + } + + if (owner.getType() == Account.Type.PROJECT) { + // only project owner can delete/modify the project + if (accessType == AccessType.ModifyProject) { + if (!_projectMgr.canModifyProjectAccount(caller, owner.getId())) { + logger.error(String.format("Caller ID: %d does not have permission to modify project with " + + "owner ID: %d", caller.getId(), owner.getId())); + throw exception; + } + } else if (!_projectMgr.canAccessProjectAccount(caller, owner.getId())) { + logger.error(String.format("Caller ID: %d does not have permission to access project with " + + "owner ID: %d", caller.getId(), owner.getId())); + throw exception; + } + checkOperationPermitted(caller, entity); + return; + } + + logger.error(String.format("Caller ID: %d does not have permission to access %s", caller.getId(), entityLog)); + throw exception; + } + + protected boolean checkOperationPermitted(Account caller, ControlledEntity entity) { User user = CallContext.current().getCallingUser(); Project project = projectDao.findByProjectAccountId(entity.getAccountId()); if (project == null) { diff --git a/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java b/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java index 36330d6685c..99ac2492e83 100644 --- a/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java +++ b/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java @@ -128,12 +128,10 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { if (type == Host.Type.Storage) { // FirstFitAllocator should be used for user VMs only since it won't care whether the host is capable of routing or not - return new ArrayList(); + return new ArrayList<>(); } - if (logger.isDebugEnabled()) { - logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); - } + logger.debug("Looking for hosts in zone [{}], pod [{}], cluster [{}]", dcId, podId, clusterId); String hostTagOnOffering = offering.getHostTag(); String hostTagOnTemplate = template.getTemplateTag(); @@ -142,8 +140,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { boolean hasSvcOfferingTag = hostTagOnOffering != null ? true : false; boolean hasTemplateTag = hostTagOnTemplate != null ? true : false; - List clusterHosts = new ArrayList(); - List hostsMatchingUefiTag = new ArrayList(); + List clusterHosts = new ArrayList<>(); + List hostsMatchingUefiTag = new ArrayList<>(); if(isVMDeployedWithUefi){ hostsMatchingUefiTag = _hostDao.listByHostCapability(type, clusterId, podId, dcId, Host.HOST_UEFI_ENABLE); if (logger.isDebugEnabled()) { @@ -159,8 +157,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { if (hostTagOnOffering == null && hostTagOnTemplate == null) { clusterHosts = _resourceMgr.listAllUpAndEnabledNonHAHosts(type, clusterId, podId, dcId); } else { - List hostsMatchingOfferingTag = new ArrayList(); - List hostsMatchingTemplateTag = new ArrayList(); + List hostsMatchingOfferingTag = new ArrayList<>(); + List hostsMatchingTemplateTag = new ArrayList<>(); if (hasSvcOfferingTag) { if (logger.isDebugEnabled()) { logger.debug("Looking for hosts having tag specified on SvcOffering:" + hostTagOnOffering); @@ -205,7 +203,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { if (clusterHosts.isEmpty()) { - logger.error(String.format("No suitable host found for vm [%s] with tags [%s].", vmProfile, hostTagOnOffering)); + logger.error("No suitable host found for vm [{}] with tags [{}].", vmProfile, hostTagOnOffering); throw new CloudRuntimeException(String.format("No suitable host found for vm [%s].", vmProfile)); } // add all hosts that we are not considering to the avoid list @@ -231,8 +229,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { ServiceOffering offering = vmProfile.getServiceOffering(); VMTemplateVO template = (VMTemplateVO)vmProfile.getTemplate(); Account account = vmProfile.getOwner(); - List suitableHosts = new ArrayList(); - List hostsCopy = new ArrayList(hosts); + List suitableHosts = new ArrayList<>(); + List hostsCopy = new ArrayList<>(hosts); if (type == Host.Type.Storage) { // FirstFitAllocator should be used for user VMs only since it won't care whether the host is capable of @@ -314,7 +312,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { } long serviceOfferingId = offering.getId(); - List suitableHosts = new ArrayList(); + List suitableHosts = new ArrayList<>(); ServiceOfferingDetailsVO offeringDetails = null; for (Host host : hosts) { @@ -383,15 +381,15 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { } //now filter the given list of Hosts by this ordered list - Map hostMap = new HashMap(); + Map hostMap = new HashMap<>(); for (Host host : hosts) { hostMap.put(host.getId(), host); } - List matchingHostIds = new ArrayList(hostMap.keySet()); + List matchingHostIds = new ArrayList<>(hostMap.keySet()); hostIdsByFreeCapacity.retainAll(matchingHostIds); - List reorderedHosts = new ArrayList(); + List reorderedHosts = new ArrayList<>(); for(Long id: hostIdsByFreeCapacity){ reorderedHosts.add(hostMap.get(id)); } @@ -413,15 +411,15 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { } //now filter the given list of Hosts by this ordered list - Map hostMap = new HashMap(); + Map hostMap = new HashMap<>(); for (Host host : hosts) { hostMap.put(host.getId(), host); } - List matchingHostIds = new ArrayList(hostMap.keySet()); + List matchingHostIds = new ArrayList<>(hostMap.keySet()); hostIdsByVmCount.retainAll(matchingHostIds); - List reorderedHosts = new ArrayList(); + List reorderedHosts = new ArrayList<>(); for (Long id : hostIdsByVmCount) { reorderedHosts.add(hostMap.get(id)); } @@ -444,11 +442,11 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { // Determine the guest OS category of the template String templateGuestOSCategory = getTemplateGuestOSCategory(template); - List prioritizedHosts = new ArrayList(); - List noHvmHosts = new ArrayList(); + List prioritizedHosts = new ArrayList<>(); + List noHvmHosts = new ArrayList<>(); // If a template requires HVM and a host doesn't support HVM, remove it from consideration - List hostsToCheck = new ArrayList(); + List hostsToCheck = new ArrayList<>(); if (template.isRequiresHvm()) { for (Host host : hosts) { if (hostSupportsHVM(host)) { @@ -468,8 +466,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { } // If a host is tagged with the same guest OS category as the template, move it to a high priority list // If a host is tagged with a different guest OS category than the template, move it to a low priority list - List highPriorityHosts = new ArrayList(); - List lowPriorityHosts = new ArrayList(); + List highPriorityHosts = new ArrayList<>(); + List lowPriorityHosts = new ArrayList<>(); for (Host host : hostsToCheck) { String hostGuestOSCategory = getHostGuestOSCategory(host); if (hostGuestOSCategory == null) { @@ -502,7 +500,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { // if service offering is not GPU enabled then move all the GPU enabled hosts to the end of priority list. if (_serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString()) == null) { - List gpuEnabledHosts = new ArrayList(); + List gpuEnabledHosts = new ArrayList<>(); // Check for GPU enabled hosts. for (Host host : prioritizedHosts) { if (_resourceMgr.isHostGpuEnabled(host.getId())) { diff --git a/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java b/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java index 286bef7d39a..51b45a2dc98 100644 --- a/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java +++ b/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java @@ -26,6 +26,7 @@ import java.util.Set; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -73,7 +74,7 @@ public class RecreateHostAllocator extends FirstFitRoutingAllocator { public List allocateTo(VirtualMachineProfile vm, DeploymentPlan plan, Type type, ExcludeList avoid, int returnUpTo) { List hosts = super.allocateTo(vm, plan, type, avoid, returnUpTo); - if (hosts != null && !hosts.isEmpty()) { + if (CollectionUtils.isNotEmpty(hosts)) { return hosts; } diff --git a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java index 8460ac0d33f..4c4f08f12bd 100644 --- a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java @@ -800,7 +800,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi @Override public ConfigKey[] getConfigKeys() { return new ConfigKey[] {CPUCapacityThreshold, MemoryCapacityThreshold, StorageAllocatedCapacityThreshold, StorageCapacityThreshold, AlertSmtpEnabledSecurityProtocols, - AlertSmtpUseStartTLS, Ipv6SubnetCapacityThreshold}; + AlertSmtpUseStartTLS, Ipv6SubnetCapacityThreshold, AlertSmtpUseAuth}; } @Override diff --git a/server/src/main/java/com/cloud/api/ApiDBUtils.java b/server/src/main/java/com/cloud/api/ApiDBUtils.java index a30abada404..a169ebc0f19 100644 --- a/server/src/main/java/com/cloud/api/ApiDBUtils.java +++ b/server/src/main/java/com/cloud/api/ApiDBUtils.java @@ -30,6 +30,7 @@ import java.util.stream.Collectors; import javax.annotation.PostConstruct; import javax.inject.Inject; +import com.cloud.cpu.CPU; import org.apache.cloudstack.acl.Role; import org.apache.cloudstack.acl.RoleService; import org.apache.cloudstack.affinity.AffinityGroup; @@ -50,6 +51,7 @@ import org.apache.cloudstack.api.response.DiskOfferingResponse; import org.apache.cloudstack.api.response.DomainResponse; import org.apache.cloudstack.api.response.DomainRouterResponse; import org.apache.cloudstack.api.response.EventResponse; +import org.apache.cloudstack.api.response.SharedFSResponse; import org.apache.cloudstack.api.response.HostForMigrationResponse; import org.apache.cloudstack.api.response.HostResponse; import org.apache.cloudstack.api.response.HostTagResponse; @@ -93,6 +95,9 @@ import org.apache.cloudstack.storage.datastore.db.ObjectStoreDao; import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.query.dao.SharedFSJoinDao; +import org.apache.cloudstack.storage.sharedfs.query.vo.SharedFSJoinVO; import com.cloud.agent.api.VgpuTypesInfo; import com.cloud.api.query.dao.AccountJoinDao; @@ -491,6 +496,8 @@ public class ApiDBUtils { static SnapshotPolicyDetailsDao s_snapshotPolicyDetailsDao; static ObjectStoreDao s_objectStoreDao; + static SharedFSJoinDao s_sharedFSJoinDao; + static BucketDao s_bucketDao; static VirtualMachineManager s_virtualMachineManager; @@ -758,6 +765,8 @@ public class ApiDBUtils { private BucketDao bucketDao; @Inject private VirtualMachineManager virtualMachineManager; + @Inject + private SharedFSJoinDao sharedFSJoinDao; @PostConstruct void init() { @@ -893,6 +902,7 @@ public class ApiDBUtils { s_objectStoreDao = objectStoreDao; s_bucketDao = bucketDao; s_virtualMachineManager = virtualMachineManager; + s_sharedFSJoinDao = sharedFSJoinDao; } // /////////////////////////////////////////////////////////// @@ -1103,8 +1113,8 @@ public class ApiDBUtils { return null; } - public static ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(Long diskOfferingId) { - ServiceOfferingVO off = s_serviceOfferingDao.findServiceOfferingByComputeOnlyDiskOffering(diskOfferingId); + public static ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(Long diskOfferingId, boolean includingRemoved) { + ServiceOfferingVO off = s_serviceOfferingDao.findServiceOfferingByComputeOnlyDiskOffering(diskOfferingId, includingRemoved); return off; } public static DomainVO findDomainById(Long domainId) { @@ -2021,8 +2031,8 @@ public class ApiDBUtils { return s_volJoinDao.newVolumeView(vr); } - public static StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO vr) { - return s_poolJoinDao.newStoragePoolResponse(vr); + public static StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO vr, boolean customStats) { + return s_poolJoinDao.newStoragePoolResponse(vr, customStats); } public static StorageTagResponse newStorageTagResponse(StoragePoolTagVO vr) { @@ -2269,4 +2279,16 @@ public class ApiDBUtils { public static ObjectStoreResponse fillObjectStoreDetails(ObjectStoreResponse storeData, ObjectStoreVO store) { return s_objectStoreDao.setObjectStoreResponse(storeData, store); } + + public static SharedFSResponse newSharedFSResponse(ResponseView view, SharedFSJoinVO sharedFSView) { + return s_sharedFSJoinDao.newSharedFSResponse(view, sharedFSView); + } + + public static SharedFSJoinVO newSharedFSView(SharedFS sharedFS) { + return s_sharedFSJoinDao.newSharedFSView(sharedFS); + } + + public static List listZoneClustersArchs(long zoneId) { + return s_clusterDao.getClustersArchsByZone(zoneId); + } } diff --git a/server/src/main/java/com/cloud/api/ApiResponseHelper.java b/server/src/main/java/com/cloud/api/ApiResponseHelper.java index 5a580cb86e4..810f0abd7e0 100644 --- a/server/src/main/java/com/cloud/api/ApiResponseHelper.java +++ b/server/src/main/java/com/cloud/api/ApiResponseHelper.java @@ -39,7 +39,13 @@ import java.util.stream.Collectors; import javax.inject.Inject; +import com.cloud.bgp.ASNumber; +import com.cloud.bgp.ASNumberRange; +import com.cloud.dc.ASNumberRangeVO; +import com.cloud.dc.ASNumberVO; import com.cloud.dc.VlanDetailsVO; +import com.cloud.dc.dao.ASNumberDao; +import com.cloud.dc.dao.ASNumberRangeDao; import com.cloud.dc.dao.VlanDetailsDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.BucketVO; @@ -61,13 +67,17 @@ import org.apache.cloudstack.api.response.AccountResponse; import org.apache.cloudstack.api.response.ApplicationLoadBalancerInstanceResponse; import org.apache.cloudstack.api.response.ApplicationLoadBalancerResponse; import org.apache.cloudstack.api.response.ApplicationLoadBalancerRuleResponse; +import org.apache.cloudstack.api.response.ASNRangeResponse; +import org.apache.cloudstack.api.response.ASNumberResponse; import org.apache.cloudstack.api.response.AsyncJobResponse; import org.apache.cloudstack.api.response.AutoScalePolicyResponse; import org.apache.cloudstack.api.response.AutoScaleVmGroupResponse; import org.apache.cloudstack.api.response.AutoScaleVmProfileResponse; import org.apache.cloudstack.api.response.BackupOfferingResponse; +import org.apache.cloudstack.api.response.BackupRepositoryResponse; import org.apache.cloudstack.api.response.BackupResponse; import org.apache.cloudstack.api.response.BackupScheduleResponse; +import org.apache.cloudstack.api.response.BgpPeerResponse; import org.apache.cloudstack.api.response.BucketResponse; import org.apache.cloudstack.api.response.CapabilityResponse; import org.apache.cloudstack.api.response.CapacityResponse; @@ -89,6 +99,7 @@ import org.apache.cloudstack.api.response.DomainResponse; import org.apache.cloudstack.api.response.DomainRouterResponse; import org.apache.cloudstack.api.response.EventResponse; import org.apache.cloudstack.api.response.ExtractResponse; +import org.apache.cloudstack.api.response.SharedFSResponse; import org.apache.cloudstack.api.response.FirewallResponse; import org.apache.cloudstack.api.response.FirewallRuleResponse; import org.apache.cloudstack.api.response.GlobalLoadBalancerResponse; @@ -108,6 +119,7 @@ import org.apache.cloudstack.api.response.InternalLoadBalancerElementResponse; import org.apache.cloudstack.api.response.IpForwardingRuleResponse; import org.apache.cloudstack.api.response.IpQuarantineResponse; import org.apache.cloudstack.api.response.IpRangeResponse; +import org.apache.cloudstack.api.response.Ipv4RouteResponse; import org.apache.cloudstack.api.response.Ipv6RouteResponse; import org.apache.cloudstack.api.response.IsolationMethodResponse; import org.apache.cloudstack.api.response.LBHealthCheckPolicyResponse; @@ -184,8 +196,10 @@ import org.apache.cloudstack.api.response.VpnUsersResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.backup.Backup; import org.apache.cloudstack.backup.BackupOffering; +import org.apache.cloudstack.backup.BackupRepository; import org.apache.cloudstack.backup.BackupSchedule; import org.apache.cloudstack.backup.dao.BackupOfferingDao; +import org.apache.cloudstack.backup.dao.BackupRepositoryDao; import org.apache.cloudstack.config.Configuration; import org.apache.cloudstack.config.ConfigurationGroup; import org.apache.cloudstack.config.ConfigurationSubGroup; @@ -202,6 +216,9 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.framework.jobs.AsyncJob; import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.management.ManagementServerHost; +import org.apache.cloudstack.network.BgpPeerVO; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.apache.cloudstack.network.dao.BgpPeerDao; import org.apache.cloudstack.network.lb.ApplicationLoadBalancerRule; import org.apache.cloudstack.region.PortableIp; import org.apache.cloudstack.region.PortableIpRange; @@ -213,6 +230,8 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.query.vo.SharedFSJoinVO; import org.apache.cloudstack.storage.object.Bucket; import org.apache.cloudstack.storage.object.ObjectStore; import org.apache.cloudstack.usage.Usage; @@ -345,6 +364,7 @@ import com.cloud.network.vpc.StaticRoute; import com.cloud.network.vpc.Vpc; import com.cloud.network.vpc.VpcOffering; import com.cloud.network.vpc.VpcVO; +import com.cloud.network.vpc.dao.VpcOfferingDao; import com.cloud.offering.DiskOffering; import com.cloud.offering.NetworkOffering; import com.cloud.offering.NetworkOffering.Detail; @@ -372,7 +392,6 @@ import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; import com.cloud.storage.StoragePool; import com.cloud.storage.Upload; -import com.cloud.storage.UploadVO; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; @@ -487,9 +506,21 @@ public class ApiResponseHelper implements ResponseGenerator { UserDataDao userDataDao; @Inject VlanDetailsDao vlanDetailsDao; + @Inject + BackupRepositoryDao backupRepositoryDao; + @Inject + private ASNumberRangeDao asNumberRangeDao; + @Inject + private ASNumberDao asNumberDao; @Inject ObjectStoreDao _objectStoreDao; + @Inject + VpcOfferingDao vpcOfferingDao; + @Inject + BgpPeerDao bgpPeerDao; + @Inject + RoutedIpv4Manager routedIpv4Manager; @Override public UserResponse createUserResponse(User user) { @@ -669,6 +700,7 @@ public class ApiResponseHelper implements ResponseGenerator { snapshotResponse.setVolumeId(volume.getUuid()); snapshotResponse.setVolumeName(volume.getName()); snapshotResponse.setVolumeType(volume.getVolumeType().name()); + snapshotResponse.setVolumeState(volume.getState().name()); snapshotResponse.setVirtualSize(volume.getSize()); DataCenter zone = ApiDBUtils.findZoneById(volume.getDataCenterId()); if (zone != null) { @@ -795,6 +827,7 @@ public class ApiResponseHelper implements ResponseGenerator { if (domain != null) { vmSnapshotResponse.setDomainId(domain.getUuid()); vmSnapshotResponse.setDomainName(domain.getName()); + vmSnapshotResponse.setDomainPath(domain.getPath()); } List tags = _resourceTagDao.listBy(vmSnapshot.getId(), ResourceObjectType.VMSnapshot); @@ -1004,6 +1037,12 @@ public class ApiResponseHelper implements ResponseGenerator { if (!isAdmin) { return; } + try { + nic.getInstanceId(); + } catch (NullPointerException ex) { + return; + } + VirtualMachine vm = ApiDBUtils.findVMInstanceById(nic.getInstanceId()); if (vm == null) { return; @@ -1446,7 +1485,7 @@ public class ApiResponseHelper implements ResponseGenerator { @Override public StoragePoolResponse createStoragePoolResponse(StoragePool pool) { List viewPools = ApiDBUtils.newStoragePoolView(pool); - List listPools = ViewResponseHelper.createStoragePoolResponse(viewPools.toArray(new StoragePoolJoinVO[viewPools.size()])); + List listPools = ViewResponseHelper.createStoragePoolResponse(false, viewPools.toArray(new StoragePoolJoinVO[viewPools.size()])); assert listPools != null && listPools.size() == 1 : "There should be one storage pool returned"; return listPools.get(0); } @@ -1491,6 +1530,9 @@ public class ApiResponseHelper implements ResponseGenerator { clusterResponse.setCpuOvercommitRatio(cpuOvercommitRatio); clusterResponse.setMemoryOvercommitRatio(memoryOvercommitRatio); clusterResponse.setResourceDetails(_clusterDetailsDao.findDetails(cluster.getId())); + if (cluster.getArch() != null) { + clusterResponse.setArch(cluster.getArch().getType()); + } if (showCapacities != null && showCapacities) { List capacities = ApiDBUtils.getCapacityByClusterPodZone(null, null, cluster.getId()); @@ -1518,7 +1560,7 @@ public class ApiResponseHelper implements ResponseGenerator { } // Do it for stats as well. capacityResponses.addAll(getStatsCapacityresponse(null, cluster.getId(), pod.getId(), pod.getDataCenterId())); - clusterResponse.setCapacitites(new ArrayList(capacityResponses)); + clusterResponse.setCapacities(new ArrayList(capacityResponses)); } clusterResponse.setHasAnnotation(annotationDao.hasAnnotations(cluster.getUuid(), AnnotationService.EntityType.CLUSTER.name(), _accountMgr.isRootAdmin(CallContext.current().getCallingAccount().getId()))); @@ -1907,58 +1949,49 @@ public class ApiResponseHelper implements ResponseGenerator { return listSgs.get(0); } - //TODO: we need to deprecate uploadVO, since extract is done in a synchronous fashion - @Override - public ExtractResponse createExtractResponse(Long id, Long zoneId, Long accountId, String mode, String url) { - + private ExtractResponse createExtractResponse (Long zoneId, Long accountId, String url) { ExtractResponse response = new ExtractResponse(); - response.setObjectName("template"); - VMTemplateVO template = ApiDBUtils.findTemplateById(id); - response.setId(template.getUuid()); - response.setName(template.getName()); if (zoneId != null) { DataCenter zone = ApiDBUtils.findZoneById(zoneId); response.setZoneId(zone.getUuid()); response.setZoneName(zone.getName()); } - response.setMode(mode); response.setUrl(url); response.setState(Upload.Status.DOWNLOAD_URL_CREATED.toString()); Account account = ApiDBUtils.findAccountById(accountId); response.setAccountId(account.getUuid()); - return response; } @Override - public ExtractResponse createExtractResponse(Long uploadId, Long id, Long zoneId, Long accountId, String mode, String url) { + public ExtractResponse createVolumeExtractResponse(Long id, Long zoneId, Long accountId, String mode, String url) { + ExtractResponse response = createExtractResponse(zoneId, accountId, url); + response.setObjectName("volume"); + response.setMode(mode); + Volume volume = ApiDBUtils.findVolumeById(id); + response.setId(volume.getUuid()); + response.setName(volume.getName()); + return response; + } - ExtractResponse response = new ExtractResponse(); - response.setObjectName("template"); + @Override + public ExtractResponse createSnapshotExtractResponse(Long id, Long zoneId, Long accountId, String url) { + ExtractResponse response = createExtractResponse(zoneId, accountId, url); + response.setObjectName("snapshot"); + Snapshot snapshot = ApiDBUtils.findSnapshotById(id); + response.setId(snapshot.getUuid()); + response.setName(snapshot.getName()); + return response; + } + + @Override + public ExtractResponse createImageExtractResponse(Long id, Long zoneId, Long accountId, String mode, String url) { + ExtractResponse response = createExtractResponse(zoneId, accountId, url); + response.setMode(mode); VMTemplateVO template = ApiDBUtils.findTemplateById(id); response.setId(template.getUuid()); response.setName(template.getName()); - if (zoneId != null) { - DataCenter zone = ApiDBUtils.findZoneById(zoneId); - response.setZoneId(zone.getUuid()); - response.setZoneName(zone.getName()); - } - response.setMode(mode); - if (uploadId == null) { - // region-wide image store - response.setUrl(url); - response.setState(Upload.Status.DOWNLOAD_URL_CREATED.toString()); - } else { - UploadVO uploadInfo = ApiDBUtils.findUploadById(uploadId); - response.setUploadId(uploadInfo.getUuid()); - response.setState(uploadInfo.getUploadState().toString()); - response.setUrl(uploadInfo.getUploadUrl()); - } - Account account = ApiDBUtils.findAccountById(accountId); - response.setAccountId(account.getUuid()); - return response; - } @Override @@ -2246,6 +2279,7 @@ public class ApiResponseHelper implements ResponseGenerator { response.setDomainId(securityGroup.getDomainUuid()); response.setDomainName(securityGroup.getDomainName()); + response.setDomainPath(securityGroup.getDomainPath()); for (SecurityRule securityRule : securityRules) { SecurityGroupRuleResponse securityGroupData = new SecurityGroupRuleResponse(); @@ -2382,7 +2416,9 @@ public class ApiResponseHelper implements ResponseGenerator { response.setForVpc(_configMgr.isOfferingForVpc(offering)); response.setForTungsten(offering.isForTungsten()); response.setForNsx(offering.isForNsx()); - response.setNsxMode(offering.getNsxMode()); + if (offering.getNetworkMode() != null) { + response.setNetworkMode(offering.getNetworkMode().name()); + } response.setServices(serviceResponses); //set network offering details Map details = _ntwkModel.getNtwkOffDetails(offering.getId()); @@ -2485,7 +2521,6 @@ public class ApiResponseHelper implements ResponseGenerator { } } response.setReservedIpRange(reservation); - // return vlan information only to Root admin if (network.getBroadcastUri() != null && view == ResponseView.Full) { String broadcastUri = network.getBroadcastUri().toString(); @@ -2533,6 +2568,13 @@ public class ApiResponseHelper implements ResponseGenerator { if (Network.GuestType.Isolated.equals(network.getGuestType()) && network.getVpcId() == null) { response.setEgressDefaultPolicy(networkOffering.isEgressDefaultPolicy()); } + ASNumberVO asNumberVO = networkOffering.isForVpc() ? + asNumberDao.findByZoneAndVpcId(network.getDataCenterId(), network.getVpcId()) : + asNumberDao.findByZoneAndNetworkId(network.getDataCenterId(), network.getId()); + if (Objects.nonNull(asNumberVO)) { + response.setAsNumberId(asNumberVO.getUuid()); + response.setAsNumber(asNumberVO.getAsNumber()); + } } if (network.getAclType() != null) { @@ -2650,6 +2692,7 @@ public class ApiResponseHelper implements ResponseGenerator { if (domain != null) { response.setDomainId(domain.getUuid()); response.setDomainName(domain.getName()); + response.setDomainPath(domain.getPath()); } } @@ -2712,6 +2755,29 @@ public class ApiResponseHelper implements ResponseGenerator { response.setBytesReceived(bytesReceived); response.setBytesSent(bytesSent); + if (networkOfferingDao.isRoutedNetwork(network.getNetworkOfferingId())) { + if (routedIpv4Manager.isDynamicRoutedNetwork(network)) { + response.setIpv4Routing(Network.Routing.Dynamic.name()); + } else { + response.setIpv4Routing(Network.Routing.Static.name()); + } + response.setIpv4Routes(new LinkedHashSet<>()); + List ips = network.getVpcId() != null ? userIpAddressDao.listByAssociatedVpc(network.getVpcId(), true): + userIpAddressDao.listByAssociatedNetwork(network.getId(), true); + for (IpAddress ip : ips) { + Ipv4RouteResponse route = new Ipv4RouteResponse(network.getCidr(), ip.getAddress().addr()); + response.addIpv4Route(route); + } + + if (view == ResponseView.Full) { + List bgpPeerVOS = bgpPeerDao.listNonRevokeByNetworkId(network.getId()); + for (BgpPeerVO bgpPeerVO : bgpPeerVOS) { + BgpPeerResponse bgpPeerResponse = routedIpv4Manager.createBgpPeerResponse(bgpPeerVO); + response.addBgpPeer(bgpPeerResponse); + } + } + } + if (networkOfferingDao.isIpv6Supported(network.getNetworkOfferingId())) { response.setInternetProtocol(networkOfferingDao.getNetworkOfferingInternetProtocol(network.getNetworkOfferingId(), NetUtils.InternetProtocol.IPv4).toString()); response.setIpv6Routing(Network.Routing.Static.toString()); @@ -2776,19 +2842,19 @@ public class ApiResponseHelper implements ResponseGenerator { List cidrs = ApiDBUtils.findFirewallSourceCidrs(fwRule.getId()); response.setCidrList(StringUtils.join(cidrs, ",")); - if(fwRule.getTrafficType() == FirewallRule.TrafficType.Egress){ - List destCidrs = ApiDBUtils.findFirewallDestCidrs(fwRule.getId()); - response.setDestCidr(StringUtils.join(destCidrs,",")); - } + List destCidrs = ApiDBUtils.findFirewallDestCidrs(fwRule.getId()); + response.setDestCidr(StringUtils.join(destCidrs,",")); if (fwRule.getTrafficType() == FirewallRule.TrafficType.Ingress) { - IpAddress ip = ApiDBUtils.findIpAddressById(fwRule.getSourceIpAddressId()); - response.setPublicIpAddressId(ip.getUuid()); - response.setPublicIpAddress(ip.getAddress().addr()); + if (fwRule.getSourceIpAddressId() != null) { + IpAddress ip = ApiDBUtils.findIpAddressById(fwRule.getSourceIpAddressId()); + response.setPublicIpAddressId(ip.getUuid()); + response.setPublicIpAddress(ip.getAddress().addr()); + } } - Network network = ApiDBUtils.findNetworkById(fwRule.getNetworkId()); - response.setNetworkId(network.getUuid()); + Network network = ApiDBUtils.findNetworkById(fwRule.getNetworkId()); + response.setNetworkId(network.getUuid()); FirewallRule.State state = fwRule.getState(); String stateToSet = state.toString(); @@ -2799,6 +2865,7 @@ public class ApiResponseHelper implements ResponseGenerator { response.setIcmpCode(fwRule.getIcmpCode()); response.setIcmpType(fwRule.getIcmpType()); response.setForDisplay(fwRule.isDisplay()); + response.setTrafficType(fwRule.getTrafficType().toString()); // set tag information List tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.FirewallRule, fwRule.getId()); @@ -2898,6 +2965,7 @@ public class ApiResponseHelper implements ResponseGenerator { Domain domain = ApiDBUtils.findDomainById(object.getDomainId()); response.setDomainId(domain.getUuid()); response.setDomainName(domain.getName()); + response.setDomainPath(domain.getPath()); } private void populateOwner(ControlledViewEntityResponse response, ControlledEntity object) { @@ -2915,6 +2983,7 @@ public class ApiResponseHelper implements ResponseGenerator { Domain domain = ApiDBUtils.findDomainById(object.getDomainId()); response.setDomainId(domain.getUuid()); response.setDomainName(domain.getName()); + response.setDomainPath(domain.getPath()); } public static void populateOwner(ControlledViewEntityResponse response, ControlledViewEntity object) { @@ -2928,6 +2997,7 @@ public class ApiResponseHelper implements ResponseGenerator { response.setDomainId(object.getDomainUuid()); response.setDomainName(object.getDomainName()); + response.setDomainPath(object.getDomainPath()); } private void populateAccount(ControlledEntityResponse response, long accountId) { @@ -2954,6 +3024,7 @@ public class ApiResponseHelper implements ResponseGenerator { response.setDomainId(domain.getUuid()); response.setDomainName(domain.getName()); + response.setDomainPath(domain.getPath()); } @Override @@ -3378,7 +3449,11 @@ public class ApiResponseHelper implements ResponseGenerator { response.setUsesDistributedRouter(vpc.usesDistributedRouter()); response.setRedundantRouter(vpc.isRedundant()); response.setRegionLevelVpc(vpc.isRegionLevelVpc()); - + ASNumberVO asNumberVO = asNumberDao.findByZoneAndVpcId(vpc.getZoneId(), vpc.getId()); + if (Objects.nonNull(asNumberVO)) { + response.setAsNumberId(asNumberVO.getUuid()); + response.setAsNumber(asNumberVO.getAsNumber()); + } Map> serviceProviderMap = ApiDBUtils.listVpcOffServices(vpc.getVpcOfferingId()); List serviceResponses = new ArrayList(); for (Map.Entry>entry : serviceProviderMap.entrySet()) { @@ -3436,6 +3511,31 @@ public class ApiResponseHelper implements ResponseGenerator { response.setDns2(vpc.getIp4Dns2()); response.setIpv6Dns1(vpc.getIp6Dns1()); response.setIpv6Dns2(vpc.getIp6Dns2()); + + // add IPv4 routes + if (vpcOfferingDao.isRoutedVpc(vpc.getVpcOfferingId())) { + if (Objects.nonNull(asNumberVO)) { + response.setIpv4Routing(Network.Routing.Dynamic.name()); + } else { + response.setIpv4Routing(Network.Routing.Static.name()); + } + response.setIpv4Routes(new LinkedHashSet<>()); + List ips = userIpAddressDao.listByAssociatedVpc(vpc.getId(), true); + for (Network network : networkDao.listByVpc(vpc.getId())) { + for (IPAddressVO ip : ips) { + Ipv4RouteResponse route = new Ipv4RouteResponse(network.getCidr(), ip.getAddress().addr()); + response.addIpv4Route(route); + } + } + if (view == ResponseView.Full) { + List bgpPeerVOS = bgpPeerDao.listNonRevokeByVpcId(vpc.getId()); + for (BgpPeerVO bgpPeerVO : bgpPeerVOS) { + BgpPeerResponse bgpPeerResponse = routedIpv4Manager.createBgpPeerResponse(bgpPeerVO); + response.addBgpPeer(bgpPeerResponse); + } + } + } + response.setObjectName("vpc"); return response; } @@ -3892,6 +3992,7 @@ public class ApiResponseHelper implements ResponseGenerator { if (domain != null) { usageRecResponse.setDomainId(domain.getUuid()); usageRecResponse.setDomainName(domain.getName()); + usageRecResponse.setDomainPath(domain.getPath()); } if (usageRecord.getZoneId() != null) { @@ -4689,6 +4790,7 @@ public class ApiResponseHelper implements ResponseGenerator { if (domain != null) { response.setDomainId(domain.getUuid()); response.setDomainName(domain.getName()); + response.setDomainPath(domain.getPath()); } response.setObjectName("affinitygroup"); @@ -5156,7 +5258,7 @@ public class ApiResponseHelper implements ResponseGenerator { } else if (instance.getHostName() != null) { response.setHostName(instance.getHostName()); } - response.setPowerState(instance.getPowerState().toString()); + response.setPowerState((instance.getPowerState() != null)? instance.getPowerState().toString() : UnmanagedInstanceTO.PowerState.PowerUnknown.toString()); response.setCpuCores(instance.getCpuCores()); response.setCpuSpeed(instance.getCpuSpeed()); response.setCpuCoresPerSocket(instance.getCpuCoresPerSocket()); @@ -5274,4 +5376,83 @@ public class ApiResponseHelper implements ResponseGenerator { populateAccount(bucketResponse, bucket.getAccountId()); return bucketResponse; } + + @Override + public ASNRangeResponse createASNumberRangeResponse(ASNumberRange asnRange) { + ASNRangeResponse response = new ASNRangeResponse(); + response.setId(asnRange.getUuid()); + DataCenterVO zone = ApiDBUtils.findZoneById(asnRange.getDataCenterId()); + if (zone != null) { + response.setZoneId(zone.getUuid()); + } + response.setStartASNumber(asnRange.getStartASNumber()); + response.setEndASNumber(asnRange.getEndASNumber()); + response.setCreated(asnRange.getCreated()); + response.setObjectName("asnumberrange"); + return response; + } + + @Override + public ASNumberResponse createASNumberResponse(ASNumber asn) { + ASNumberResponse response = new ASNumberResponse(); + response.setId(asn.getUuid()); + if (asn.getAccountId() != null) { + Account account = ApiDBUtils.findAccountById(asn.getAccountId()); + response.setAccountId(account.getUuid()); + response.setAccountName(account.getAccountName()); + } + if (asn.getDomainId() != null) { + DomainVO domain = ApiDBUtils.findDomainById(asn.getDomainId()); + response.setDomainId(domain.getUuid()); + response.setDomainName(domain.getName()); + } + DataCenterVO zone = ApiDBUtils.findZoneById(asn.getDataCenterId()); + response.setZoneId(zone.getUuid()); + response.setZoneName(zone.getName()); + response.setAsNumber(asn.getAsNumber()); + ASNumberRangeVO range = asNumberRangeDao.findById(asn.getAsNumberRangeId()); + response.setAsNumberRangeId(range.getUuid()); + String rangeText = String.format("%s-%s", range.getStartASNumber(), range.getEndASNumber()); + response.setAsNumberRange(rangeText); + response.setAllocated(asn.getAllocatedTime()); + response.setAllocationState(asn.isAllocated() ? "Allocated" : "Free"); + if (asn.getVpcId() != null) { + VpcVO vpc = ApiDBUtils.findVpcById(asn.getVpcId()); + response.setVpcId(vpc.getUuid()); + response.setVpcName(vpc.getName()); + } else if (asn.getNetworkId() != null) { + NetworkVO network = ApiDBUtils.findNetworkById(asn.getNetworkId()); + response.setAssociatedNetworkId(network.getUuid()); + response.setAssociatedNetworkName(network.getName()); + } + response.setCreated(asn.getCreated()); + response.setObjectName("asnumber"); + return response; + } + + @Override + public BackupRepositoryResponse createBackupRepositoryResponse(BackupRepository backupRepository) { + BackupRepositoryResponse response = new BackupRepositoryResponse(); + response.setName(backupRepository.getName()); + response.setId(backupRepository.getUuid()); + response.setCreated(backupRepository.getCreated()); + response.setAddress(backupRepository.getAddress()); + response.setProviderName(backupRepository.getProvider()); + response.setType(backupRepository.getType()); + response.setMountOptions(backupRepository.getMountOptions()); + response.setCapacityBytes(backupRepository.getCapacityBytes()); + response.setObjectName("backuprepository"); + DataCenter zone = ApiDBUtils.findZoneById(backupRepository.getZoneId()); + if (zone != null) { + response.setZoneId(zone.getUuid()); + response.setZoneName(zone.getName()); + } + return response; + } + + @Override + public SharedFSResponse createSharedFSResponse(ResponseView view, SharedFS sharedFS) { + SharedFSJoinVO sharedFSView = ApiDBUtils.newSharedFSView(sharedFS); + return ApiDBUtils.newSharedFSResponse(view, sharedFSView); + } } diff --git a/server/src/main/java/com/cloud/api/ApiServer.java b/server/src/main/java/com/cloud/api/ApiServer.java index fd57b43080a..739ad765afa 100644 --- a/server/src/main/java/com/cloud/api/ApiServer.java +++ b/server/src/main/java/com/cloud/api/ApiServer.java @@ -55,6 +55,13 @@ import javax.naming.ConfigurationException; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpSession; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountManagerImpl; +import com.cloud.user.DomainManager; +import com.cloud.user.User; +import com.cloud.user.UserAccount; +import com.cloud.user.UserVO; import org.apache.cloudstack.acl.APIChecker; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -95,8 +102,7 @@ import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; -import org.apache.cloudstack.framework.events.EventBus; -import org.apache.cloudstack.framework.events.EventBusException; +import org.apache.cloudstack.framework.events.EventDistributor; import org.apache.cloudstack.framework.jobs.AsyncJob; import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; @@ -104,7 +110,9 @@ import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.MessageDispatcher; import org.apache.cloudstack.framework.messagebus.MessageHandler; import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.user.UserPasswordResetManager; import org.apache.commons.codec.binary.Base64; +import org.apache.commons.lang3.EnumUtils; import org.apache.http.ConnectionClosedException; import org.apache.http.HttpException; import org.apache.http.HttpRequest; @@ -132,10 +140,9 @@ import org.apache.http.protocol.ResponseConnControl; import org.apache.http.protocol.ResponseContent; import org.apache.http.protocol.ResponseDate; import org.apache.http.protocol.ResponseServer; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.bouncycastle.jce.provider.BouncyCastleProvider; -import org.springframework.beans.factory.NoSuchBeanDefinitionException; import org.springframework.stereotype.Component; import com.cloud.api.dispatch.DispatchChainFactory; @@ -159,13 +166,6 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.exception.UnavailableCommandException; import com.cloud.projects.dao.ProjectDao; import com.cloud.storage.VolumeApiService; -import com.cloud.user.Account; -import com.cloud.user.AccountManager; -import com.cloud.user.AccountManagerImpl; -import com.cloud.user.DomainManager; -import com.cloud.user.User; -import com.cloud.user.UserAccount; -import com.cloud.user.UserVO; import com.cloud.utils.ConstantTimeComparator; import com.cloud.utils.DateUtil; import com.cloud.utils.HttpUtils; @@ -184,6 +184,8 @@ import com.cloud.utils.exception.ExceptionProxyObject; import com.cloud.utils.net.NetUtils; import com.google.gson.reflect.TypeToken; +import static org.apache.cloudstack.user.UserPasswordResetManager.UserPasswordResetEnabled; + @Component public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiServerService, Configurable { @@ -196,26 +198,28 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer */ private static final String CONTROL_CHARACTERS = "[\000-\011\013-\014\016-\037\177]"; + @Inject + private AccountManager accountMgr; + @Inject + private APIAuthenticationManager authManager; @Inject private ApiDispatcher dispatcher; @Inject - private DispatchChainFactory dispatchChainFactory; + private AsyncJobManager asyncMgr; @Inject - private AccountManager accountMgr; + private DispatchChainFactory dispatchChainFactory; @Inject private DomainManager domainMgr; @Inject private DomainDao domainDao; @Inject - private UUIDManager uuidMgr; - @Inject - private AsyncJobManager asyncMgr; - @Inject private EntityManager entityMgr; @Inject - private APIAuthenticationManager authManager; - @Inject private ProjectDao projectDao; + @Inject + private UUIDManager uuidMgr; + @Inject + private UserPasswordResetManager userPasswordResetManager; private List pluggableServices; @@ -224,6 +228,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer @Inject private ApiAsyncJobDispatcher asyncDispatcher; + private EventDistributor eventDistributor = null; private static int s_workerCount = 0; private static Map>> s_apiNameCmdClassMap = new HashMap>>(); @@ -311,6 +316,10 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer return true; } + public void setEventDistributor(EventDistributor eventDistributor) { + this.eventDistributor = eventDistributor; + } + @MessageHandler(topic = AsyncJob.Topics.JOB_EVENT_PUBLISH) public void handleAsyncJobPublishEvent(String subject, String senderAddress, Object args) { assert (args != null); @@ -322,12 +331,8 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer if (logger.isTraceEnabled()) logger.trace("Handle asyjob publish event " + jobEvent); - - EventBus eventBus = null; - try { - eventBus = ComponentContext.getComponent(EventBus.class); - } catch (NoSuchBeanDefinitionException nbe) { - return; // no provider is configured to provide events bus, so just return + if (eventDistributor == null) { + setEventDistributor(ComponentContext.getComponent(EventDistributor.class)); } if (!job.getDispatcher().equalsIgnoreCase("ApiAsyncJobDispatcher")) { @@ -340,7 +345,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer // Get the event type from the cmdInfo json string String info = job.getCmdInfo(); String cmdEventType = "unknown"; - Map cmdInfoObj = new HashMap(); + Map cmdInfoObj = new HashMap<>(); if (info != null) { Type type = new TypeToken>(){}.getType(); Map cmdInfo = ApiGsonHelper.getBuilder().create().fromJson(info, type); @@ -368,7 +373,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer org.apache.cloudstack.framework.events.Event event = new org.apache.cloudstack.framework.events.Event("management-server", EventCategory.ASYNC_JOB_CHANGE_EVENT.getName(), jobEvent, instanceType, instanceUuid); - Map eventDescription = new HashMap(); + Map eventDescription = new HashMap<>(); eventDescription.put("command", job.getCmd()); eventDescription.put("user", userJobOwner.getUuid()); eventDescription.put("account", jobOwner.getUuid()); @@ -389,13 +394,18 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer eventDescription.put("domainname", domain.getName()); } event.setDescription(eventDescription); + eventDistributor.publish(event); + } - try { - eventBus.publish(event); - } catch (EventBusException evx) { - String errMsg = "Failed to publish async job event on the event bus."; - logger.warn(errMsg, evx); + protected void setupIntegrationPortListener(Integer apiPort) { + if (apiPort == null || apiPort <= 0) { + logger.trace(String.format("Skipping setting up listener for integration port as %s is set to %d", + IntegrationAPIPort.key(), apiPort)); + return; } + logger.debug(String.format("Setting up integration API service listener on port: %d", apiPort)); + final ListenerThread listenerThread = new ListenerThread(this, apiPort); + listenerThread.start(); } @Override @@ -443,10 +453,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer setEncodeApiResponse(EncodeApiResponse.value()); - if (apiPort != null) { - final ListenerThread listenerThread = new ListenerThread(this, apiPort); - listenerThread.start(); - } + setupIntegrationPortListener(apiPort); return true; } @@ -1222,6 +1229,57 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer return true; } + @Override + public boolean forgotPassword(UserAccount userAccount, Domain domain) { + if (!UserPasswordResetEnabled.value()) { + String errorMessage = String.format("%s is false. Password reset for the user is not allowed.", + UserPasswordResetEnabled.key()); + logger.error(errorMessage); + throw new CloudRuntimeException(errorMessage); + } + if (StringUtils.isBlank(userAccount.getEmail())) { + logger.error(String.format( + "Email is not set. username: %s account id: %d domain id: %d", + userAccount.getUsername(), userAccount.getAccountId(), userAccount.getDomainId())); + throw new CloudRuntimeException("Email is not set for the user."); + } + + if (!EnumUtils.getEnumIgnoreCase(Account.State.class, userAccount.getState()).equals(Account.State.ENABLED)) { + logger.error(String.format( + "User is not enabled. username: %s account id: %d domain id: %s", + userAccount.getUsername(), userAccount.getAccountId(), domain.getUuid())); + throw new CloudRuntimeException("User is not enabled."); + } + + if (!EnumUtils.getEnumIgnoreCase(Account.State.class, userAccount.getAccountState()).equals(Account.State.ENABLED)) { + logger.error(String.format( + "Account is not enabled. username: %s account id: %d domain id: %s", + userAccount.getUsername(), userAccount.getAccountId(), domain.getUuid())); + throw new CloudRuntimeException("Account is not enabled."); + } + + if (!domain.getState().equals(Domain.State.Active)) { + logger.error(String.format( + "Domain is not active. username: %s account id: %d domain id: %s", + userAccount.getUsername(), userAccount.getAccountId(), domain.getUuid())); + throw new CloudRuntimeException("Domain is not active."); + } + + userPasswordResetManager.setResetTokenAndSend(userAccount); + return true; + } + + @Override + public boolean resetPassword(UserAccount userAccount, String token, String password) { + if (!UserPasswordResetEnabled.value()) { + String errorMessage = String.format("%s is false. Password reset for the user is not allowed.", + UserPasswordResetEnabled.key()); + logger.error(errorMessage); + throw new CloudRuntimeException(errorMessage); + } + return userPasswordResetManager.validateAndResetPassword(userAccount, token, password); + } + private void checkCommandAvailable(final User user, final String commandName, final InetAddress remoteAddress) throws PermissionDeniedException { if (user == null) { throw new PermissionDeniedException("User is null for role based API access check for command" + commandName); diff --git a/server/src/main/java/com/cloud/api/auth/APIAuthenticationManagerImpl.java b/server/src/main/java/com/cloud/api/auth/APIAuthenticationManagerImpl.java index 907ef088ee8..3c8282d0280 100644 --- a/server/src/main/java/com/cloud/api/auth/APIAuthenticationManagerImpl.java +++ b/server/src/main/java/com/cloud/api/auth/APIAuthenticationManagerImpl.java @@ -31,6 +31,8 @@ import org.apache.cloudstack.api.auth.PluggableAPIAuthenticator; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.ManagerBase; +import static org.apache.cloudstack.user.UserPasswordResetManager.UserPasswordResetEnabled; + @SuppressWarnings("unchecked") public class APIAuthenticationManagerImpl extends ManagerBase implements APIAuthenticationManager { @@ -75,6 +77,10 @@ public class APIAuthenticationManagerImpl extends ManagerBase implements APIAuth List> cmdList = new ArrayList>(); cmdList.add(DefaultLoginAPIAuthenticatorCmd.class); cmdList.add(DefaultLogoutAPIAuthenticatorCmd.class); + if (UserPasswordResetEnabled.value()) { + cmdList.add(DefaultForgotPasswordAPIAuthenticatorCmd.class); + cmdList.add(DefaultResetPasswordAPIAuthenticatorCmd.class); + } cmdList.add(ListUserTwoFactorAuthenticatorProvidersCmd.class); cmdList.add(ValidateUserTwoFactorAuthenticationCodeCmd.class); diff --git a/server/src/main/java/com/cloud/api/auth/DefaultForgotPasswordAPIAuthenticatorCmd.java b/server/src/main/java/com/cloud/api/auth/DefaultForgotPasswordAPIAuthenticatorCmd.java new file mode 100644 index 00000000000..1e90b43c5e8 --- /dev/null +++ b/server/src/main/java/com/cloud/api/auth/DefaultForgotPasswordAPIAuthenticatorCmd.java @@ -0,0 +1,165 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.api.auth; + +import com.cloud.api.ApiServlet; +import com.cloud.api.response.ApiResponseSerializer; +import com.cloud.domain.Domain; +import com.cloud.user.Account; +import com.cloud.user.User; +import com.cloud.user.UserAccount; +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.ApiServerService; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.auth.APIAuthenticationType; +import org.apache.cloudstack.api.auth.APIAuthenticator; +import org.apache.cloudstack.api.auth.PluggableAPIAuthenticator; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.jetbrains.annotations.Nullable; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.servlet.http.HttpSession; +import java.net.InetAddress; +import java.util.List; +import java.util.Map; + +@APICommand(name = "forgotPassword", + description = "Sends an email to the user with a token to reset the password using resetPassword command.", + since = "4.20.0.0", + requestHasSensitiveInfo = true, + responseObject = SuccessResponse.class) +public class DefaultForgotPasswordAPIAuthenticatorCmd extends BaseCmd implements APIAuthenticator { + + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.USERNAME, type = CommandType.STRING, description = "Username", required = true) + private String username; + + @Parameter(name = ApiConstants.DOMAIN, type = CommandType.STRING, description = "Path of the domain that the user belongs to. Example: domain=/com/cloud/internal. If no domain is passed in, the ROOT (/) domain is assumed.") + private String domain; + + @Inject + ApiServerService _apiServer; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public String getUsername() { + return username; + } + + public String getDomainName() { + return domain; + } + + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public long getEntityOwnerId() { + return Account.Type.NORMAL.ordinal(); + } + + @Override + public void execute() throws ServerApiException { + // We should never reach here + throw new ServerApiException(ApiErrorCode.METHOD_NOT_ALLOWED, "This is an authentication api, cannot be used directly"); + } + + @Override + public String authenticate(String command, Map params, HttpSession session, InetAddress remoteAddress, String responseType, StringBuilder auditTrailSb, final HttpServletRequest req, final HttpServletResponse resp) throws ServerApiException { + final String[] username = (String[])params.get(ApiConstants.USERNAME); + final String[] domainName = (String[])params.get(ApiConstants.DOMAIN); + + Long domainId = null; + String domain = null; + domain = getDomainName(auditTrailSb, domainName, domain); + + String serializedResponse = null; + if (username != null) { + try { + final Domain userDomain = _domainService.findDomainByPath(domain); + if (userDomain != null) { + domainId = userDomain.getId(); + } else { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("Unable to find the domain from the path %s", domain)); + } + final UserAccount userAccount = _accountService.getActiveUserAccount(username[0], domainId); + if (userAccount != null && List.of(User.Source.SAML2, User.Source.OAUTH2, User.Source.LDAP).contains(userAccount.getSource())) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Forgot Password is not allowed for this user"); + } + boolean success = _apiServer.forgotPassword(userAccount, userDomain); + logger.debug("Forgot password request for user " + username[0] + " in domain " + domain + " is successful: " + success); + } catch (final CloudRuntimeException ex) { + ApiServlet.invalidateHttpSession(session, "fall through to API key,"); + String msg = String.format("%s", ex.getMessage() != null ? + ex.getMessage() : + "forgot password request failed for user, check if username/domain are correct"); + auditTrailSb.append(" " + ApiErrorCode.ACCOUNT_ERROR + " " + msg); + serializedResponse = _apiServer.getSerializedApiError(ApiErrorCode.ACCOUNT_ERROR.getHttpCode(), msg, params, responseType); + if (logger.isTraceEnabled()) { + logger.trace(msg); + } + } + SuccessResponse successResponse = new SuccessResponse(); + successResponse.setSuccess(true); + successResponse.setResponseName(getCommandName()); + return ApiResponseSerializer.toSerializedString(successResponse, responseType); + } + // We should not reach here and if we do we throw an exception + throw new ServerApiException(ApiErrorCode.ACCOUNT_ERROR, serializedResponse); + } + + @Nullable + private String getDomainName(StringBuilder auditTrailSb, String[] domainName, String domain) { + if (domainName != null) { + domain = domainName[0]; + auditTrailSb.append(" domain=" + domain); + if (domain != null) { + // ensure domain starts with '/' and ends with '/' + if (!domain.endsWith("/")) { + domain += '/'; + } + if (!domain.startsWith("/")) { + domain = "/" + domain; + } + } + } + return domain; + } + + @Override + public APIAuthenticationType getAPIType() { + return APIAuthenticationType.PASSWORD_RESET; + } + + @Override + public void setAuthenticators(List authenticators) { + } +} diff --git a/server/src/main/java/com/cloud/api/auth/DefaultResetPasswordAPIAuthenticatorCmd.java b/server/src/main/java/com/cloud/api/auth/DefaultResetPasswordAPIAuthenticatorCmd.java new file mode 100644 index 00000000000..077efdee087 --- /dev/null +++ b/server/src/main/java/com/cloud/api/auth/DefaultResetPasswordAPIAuthenticatorCmd.java @@ -0,0 +1,193 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.api.auth; + +import com.cloud.api.ApiServlet; +import com.cloud.api.response.ApiResponseSerializer; +import com.cloud.domain.Domain; +import com.cloud.exception.CloudAuthenticationException; +import com.cloud.user.Account; +import com.cloud.user.User; +import com.cloud.user.UserAccount; +import com.cloud.utils.UuidUtils; +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.ApiServerService; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.auth.APIAuthenticationType; +import org.apache.cloudstack.api.auth.APIAuthenticator; +import org.apache.cloudstack.api.auth.PluggableAPIAuthenticator; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.jetbrains.annotations.Nullable; + +import javax.inject.Inject; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.servlet.http.HttpSession; +import java.net.InetAddress; +import java.util.List; +import java.util.Map; + +@APICommand(name = "resetPassword", + description = "Resets the password for the user using the token generated via forgotPassword command.", + since = "4.20.0.0", + requestHasSensitiveInfo = true, + responseObject = SuccessResponse.class) +public class DefaultResetPasswordAPIAuthenticatorCmd extends BaseCmd implements APIAuthenticator { + + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.USERNAME, + type = CommandType.STRING, + description = "Username", required = true) + private String username; + + @Parameter(name = ApiConstants.DOMAIN, + type = CommandType.STRING, + description = "Path of the domain that the user belongs to. Example: domain=/com/cloud/internal. If no domain is passed in, the ROOT (/) domain is assumed.") + private String domain; + + @Parameter(name = ApiConstants.TOKEN, + type = CommandType.STRING, + required = true, + description = "Token generated via forgotPassword command.") + private String token; + + @Parameter(name = ApiConstants.PASSWORD, + type = CommandType.STRING, + required = true, + description = "New password in clear text (Default hashed to SHA256SALT).") + private String password; + + @Inject + ApiServerService _apiServer; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public String getUsername() { + return username; + } + + public String getDomainName() { + return domain; + } + + public String getToken() { + return token; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public long getEntityOwnerId() { + return Account.Type.NORMAL.ordinal(); + } + + @Override + public void execute() throws ServerApiException { + // We should never reach here + throw new ServerApiException(ApiErrorCode.METHOD_NOT_ALLOWED, "This is an authentication api, cannot be used directly"); + } + + @Override + public String authenticate(String command, Map params, HttpSession session, InetAddress remoteAddress, String responseType, StringBuilder auditTrailSb, final HttpServletRequest req, final HttpServletResponse resp) throws ServerApiException { + final String[] username = (String[])params.get(ApiConstants.USERNAME); + final String[] password = (String[])params.get(ApiConstants.PASSWORD); + final String[] domainName = (String[])params.get(ApiConstants.DOMAIN); + final String[] token = (String[])params.get(ApiConstants.TOKEN); + + Long domainId = null; + String domain = null; + domain = getDomainName(auditTrailSb, domainName, domain); + + String serializedResponse = null; + + if (!UuidUtils.isUuid(token[0])) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Invalid token"); + } + + if (username != null) { + final String pwd = ((password == null) ? null : password[0]); + try { + final Domain userDomain = _domainService.findDomainByPath(domain); + if (userDomain != null) { + domainId = userDomain.getId(); + } else { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("Unable to find the domain from the path %s", domain)); + } + final UserAccount userAccount = _accountService.getActiveUserAccount(username[0], domainId); + if (userAccount != null && List.of(User.Source.SAML2, User.Source.OAUTH2, User.Source.LDAP).contains(userAccount.getSource())) { + throw new CloudAuthenticationException("Password reset is not allowed for CloudStack login"); + } + boolean success = _apiServer.resetPassword(userAccount, token[0], pwd); + SuccessResponse successResponse = new SuccessResponse(); + successResponse.setSuccess(success); + successResponse.setResponseName(getCommandName()); + return ApiResponseSerializer.toSerializedString(successResponse, responseType); + } catch (final CloudRuntimeException ex) { + ApiServlet.invalidateHttpSession(session, "fall through to API key,"); + String msg = String.format("%s", ex.getMessage() != null ? + ex.getMessage() : + "failed to reset password for user, check your inputs"); + auditTrailSb.append(" " + ApiErrorCode.ACCOUNT_ERROR + " " + msg); + serializedResponse = _apiServer.getSerializedApiError(ApiErrorCode.ACCOUNT_ERROR.getHttpCode(), msg, params, responseType); + if (logger.isTraceEnabled()) { + logger.trace(msg); + } + } + } + // We should not reach here and if we do we throw an exception + throw new ServerApiException(ApiErrorCode.ACCOUNT_ERROR, serializedResponse); + } + + @Nullable + private String getDomainName(StringBuilder auditTrailSb, String[] domainName, String domain) { + if (domainName != null) { + domain = domainName[0]; + auditTrailSb.append(" domain=" + domain); + if (domain != null) { + // ensure domain starts with '/' and ends with '/' + if (!domain.endsWith("/")) { + domain += '/'; + } + if (!domain.startsWith("/")) { + domain = "/" + domain; + } + } + } + return domain; + } + + @Override + public APIAuthenticationType getAPIType() { + return APIAuthenticationType.PASSWORD_RESET; + } + + @Override + public void setAuthenticators(List authenticators) { + } +} diff --git a/server/src/main/java/com/cloud/api/dispatch/ParamProcessWorker.java b/server/src/main/java/com/cloud/api/dispatch/ParamProcessWorker.java index bdba8dcace2..314b83acdb5 100644 --- a/server/src/main/java/com/cloud/api/dispatch/ParamProcessWorker.java +++ b/server/src/main/java/com/cloud/api/dispatch/ParamProcessWorker.java @@ -60,6 +60,7 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.UuidUtils; import com.cloud.utils.db.EntityManager; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.net.NetUtils; public class ParamProcessWorker implements DispatchWorker { @@ -117,8 +118,21 @@ public class ParamProcessWorker implements DispatchWorker { } } + private void validateNameForRFCCompliance(final Object param, final String argName) { + String value = String.valueOf(param); + if (StringUtils.isBlank(value) || !NetUtils.verifyDomainNameLabel(value, true)) { + String msg = "it can contain ASCII letters 'a' through 'z', the digits '0' through '9', " + + "and the hyphen ('-'), must be between 1 and 63 characters long, and can't start or end with \"-\" and can't start with digit"; + throwInvalidParameterValueException(argName, msg); + } + } + protected void throwInvalidParameterValueException(String argName) { - throw new InvalidParameterValueException(String.format("Invalid value provided for API arg: %s", argName)); + throwInvalidParameterValueException(argName, null); + } + + protected void throwInvalidParameterValueException(String argName, String customMsg) { + throw new InvalidParameterValueException(String.format("Invalid value provided for API arg: %s%s", argName, StringUtils.isBlank(customMsg)? "" : " - " + customMsg)); } private void validateField(final Object paramObj, final Parameter annotation) throws ServerApiException { @@ -155,6 +169,12 @@ public class ParamProcessWorker implements DispatchWorker { break; } break; + case RFCComplianceDomainName: + switch (annotation.type()) { + case STRING: + validateNameForRFCCompliance(paramObj, argName); + break; + } } } } @@ -165,14 +185,18 @@ public class ParamProcessWorker implements DispatchWorker { final List cmdFields = cmd.getParamFields(); + String commandName = cmd.getCommandName(); + if (commandName.endsWith(BaseCmd.RESPONSE_SUFFIX)) { + commandName = cmd.getCommandName().substring(0, cmd.getCommandName().length() - 8); + } + for (final Field field : cmdFields) { final Parameter parameterAnnotation = field.getAnnotation(Parameter.class); final Object paramObj = params.get(parameterAnnotation.name()); if (paramObj == null) { if (parameterAnnotation.required()) { throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Unable to execute API command " + - cmd.getCommandName().substring(0, cmd.getCommandName().length() - 8) + - " due to missing parameter " + parameterAnnotation.name()); + commandName + " due to missing parameter " + parameterAnnotation.name()); } continue; } @@ -186,29 +210,28 @@ public class ParamProcessWorker implements DispatchWorker { setFieldValue(field, cmd, paramObj, parameterAnnotation); } catch (final IllegalArgumentException argEx) { if (logger.isDebugEnabled()) { - logger.debug("Unable to execute API command " + cmd.getCommandName() + " due to invalid value " + paramObj + " for parameter " + + logger.debug("Unable to execute API command " + commandName + " due to invalid value " + paramObj + " for parameter " + parameterAnnotation.name()); } throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Unable to execute API command " + - cmd.getCommandName().substring(0, cmd.getCommandName().length() - 8) + " due to invalid value " + paramObj + " for parameter " + + commandName + " due to invalid value " + paramObj + " for parameter " + parameterAnnotation.name()); } catch (final ParseException parseEx) { if (logger.isDebugEnabled()) { - logger.debug("Invalid date parameter " + paramObj + " passed to command " + cmd.getCommandName().substring(0, cmd.getCommandName().length() - 8)); + logger.debug("Invalid date parameter " + paramObj + " passed to command " + commandName); } throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Unable to parse date " + paramObj + " for command " + - cmd.getCommandName().substring(0, cmd.getCommandName().length() - 8) + ", please pass dates in the format mentioned in the api documentation"); + commandName + ", please pass dates in the format mentioned in the api documentation"); } catch (final InvalidParameterValueException invEx) { throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Unable to execute API command " + - cmd.getCommandName().substring(0, cmd.getCommandName().length() - 8) + " due to invalid value. " + invEx.getMessage()); + commandName + " due to invalid value. " + invEx.getMessage()); } catch (final CloudRuntimeException cloudEx) { logger.error("CloudRuntimeException", cloudEx); // FIXME: Better error message? This only happens if the API command is not executable, which typically //means // there was // and IllegalAccessException setting one of the parameters. - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Internal error executing API command " + - cmd.getCommandName().substring(0, cmd.getCommandName().length() - 8)); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Internal error executing API command " + commandName); } //check access on the resource this field points to diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index c3ff3bdaacc..9ad42037686 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -36,6 +36,7 @@ import java.util.stream.Stream; import javax.inject.Inject; +import com.cloud.cpu.CPU; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker; @@ -71,6 +72,7 @@ import org.apache.cloudstack.api.command.admin.storage.ListStorageTagsCmd; import org.apache.cloudstack.api.command.admin.storage.heuristics.ListSecondaryStorageSelectorsCmd; import org.apache.cloudstack.api.command.admin.template.ListTemplatesCmdByAdmin; import org.apache.cloudstack.api.command.admin.user.ListUsersCmd; +import org.apache.cloudstack.api.command.admin.vm.ListAffectedVmsForStorageScopeChangeCmd; import org.apache.cloudstack.api.command.admin.zone.ListZonesCmdByAdmin; import org.apache.cloudstack.api.command.user.account.ListAccountsCmd; import org.apache.cloudstack.api.command.user.account.ListProjectAccountsCmd; @@ -128,6 +130,7 @@ import org.apache.cloudstack.api.response.StorageTagResponse; import org.apache.cloudstack.api.response.TemplateResponse; import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.api.response.UserVmResponse; +import org.apache.cloudstack.api.response.VirtualMachineResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.backup.BackupOfferingVO; @@ -213,8 +216,10 @@ import com.cloud.api.query.vo.UserVmJoinVO; import com.cloud.api.query.vo.VolumeJoinVO; import com.cloud.cluster.ManagementServerHostVO; import com.cloud.cluster.dao.ManagementServerHostDao; +import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenter; import com.cloud.dc.DedicatedResourceVO; +import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DedicatedResourceDao; import com.cloud.domain.Domain; import com.cloud.domain.DomainVO; @@ -594,6 +599,10 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q @Inject private StoragePoolHostDao storagePoolHostDao; + @Inject + private ClusterDao clusterDao; + + private SearchCriteria getMinimumCpuServiceOfferingJoinSearchCriteria(int cpu) { SearchCriteria sc = _srvOfferingJoinDao.createSearchCriteria(); SearchCriteria sc1 = _srvOfferingJoinDao.createSearchCriteria(); @@ -809,8 +818,14 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q Integer count = eventIdPage.second(); Long[] idArray = eventIdPage.first().toArray(new Long[0]); - if (count == 0) { - return new Pair<>(new ArrayList<>(), count); + /** + * Need to check array empty, because {@link com.cloud.utils.db.GenericDaoBase#searchAndCount(SearchCriteria, Filter, boolean)} + * makes two calls: first to get objects and second to get count. + * List events has start date filter, there is highly possible cause where no objects loaded + * and next millisecond new event added and finally we ended up with count = 1 and no ids. + */ + if (count == 0 || idArray.length < 1) { + count = 0; } List events = _eventJoinDao.searchByIds(idArray); @@ -1148,6 +1163,58 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q return response; } + @Override + public ListResponse listAffectedVmsForStorageScopeChange(ListAffectedVmsForStorageScopeChangeCmd cmd) { + Long poolId = cmd.getStorageId(); + StoragePoolVO pool = storagePoolDao.findById(poolId); + if (pool == null) { + throw new IllegalArgumentException("Unable to find storage pool with ID: " + poolId); + } + + ListResponse response = new ListResponse<>(); + List responsesList = new ArrayList<>(); + if (pool.getScope() != ScopeType.ZONE) { + response.setResponses(responsesList, 0); + return response; + } + + Pair, Integer> vms = _vmInstanceDao.listByVmsNotInClusterUsingPool(cmd.getClusterIdForScopeChange(), poolId); + for (VMInstanceVO vm : vms.first()) { + VirtualMachineResponse resp = new VirtualMachineResponse(); + resp.setObjectName(VirtualMachine.class.getSimpleName().toLowerCase()); + resp.setId(vm.getUuid()); + resp.setVmType(vm.getType().toString()); + + UserVmJoinVO userVM = null; + if (!vm.getType().isUsedBySystem()) { + userVM = _userVmJoinDao.findById(vm.getId()); + } + if (userVM != null) { + if (userVM.getDisplayName() != null) { + resp.setVmName(userVM.getDisplayName()); + } else { + resp.setVmName(userVM.getName()); + } + } else { + resp.setVmName(vm.getInstanceName()); + } + + HostVO host = hostDao.findById(vm.getHostId()); + if (host != null) { + resp.setHostId(host.getUuid()); + resp.setHostName(host.getName()); + ClusterVO cluster = clusterDao.findById(host.getClusterId()); + if (cluster != null) { + resp.setClusterId(cluster.getUuid()); + resp.setClusterName(cluster.getName()); + } + } + responsesList.add(resp); + } + response.setResponses(responsesList, vms.second()); + return response; + } + private Object getObjectPossibleMethodValue(Object obj, String methodName) { Object result = null; @@ -2397,7 +2464,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q private Pair, Integer> searchForVolumeIdsAndCount(ListVolumesCmd cmd) { Account caller = CallContext.current().getCallingAccount(); - List permittedAccounts = new ArrayList(); + List permittedAccounts = new ArrayList<>(); Long id = cmd.getId(); Long vmInstanceId = cmd.getVirtualMachineId(); @@ -2407,7 +2474,8 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q Map tags = cmd.getTags(); String storageId = cmd.getStorageId(); Long clusterId = cmd.getClusterId(); - Long diskOffId = cmd.getDiskOfferingId(); + Long serviceOfferingId = cmd.getServiceOfferingId(); + Long diskOfferingId = cmd.getDiskOfferingId(); Boolean display = cmd.getDisplay(); String state = cmd.getState(); boolean shouldListSystemVms = shouldListSystemVms(cmd, caller.getId()); @@ -2417,7 +2485,14 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q List ids = getIdsListFromCmd(cmd.getId(), cmd.getIds()); - Ternary domainIdRecursiveListProject = new Ternary(cmd.getDomainId(), cmd.isRecursive(), null); + if (diskOfferingId == null && serviceOfferingId != null) { + ServiceOfferingVO serviceOffering = _srvOfferingDao.findById(serviceOfferingId); + if (serviceOffering != null) { + diskOfferingId = serviceOffering.getDiskOfferingId(); + } + } + + Ternary domainIdRecursiveListProject = new Ternary<>(cmd.getDomainId(), cmd.isRecursive(), null); accountMgr.buildACLSearchParameters(caller, id, cmd.getAccountName(), cmd.getProjectId(), permittedAccounts, domainIdRecursiveListProject, cmd.listAll(), false); Long domainId = domainIdRecursiveListProject.first(); Boolean isRecursive = domainIdRecursiveListProject.second(); @@ -2437,6 +2512,13 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q volumeSearchBuilder.and("uuid", volumeSearchBuilder.entity().getUuid(), SearchCriteria.Op.NNULL); volumeSearchBuilder.and("instanceId", volumeSearchBuilder.entity().getInstanceId(), SearchCriteria.Op.EQ); volumeSearchBuilder.and("dataCenterId", volumeSearchBuilder.entity().getDataCenterId(), SearchCriteria.Op.EQ); + if (cmd.isEncrypted() != null) { + if (cmd.isEncrypted()) { + volumeSearchBuilder.and("encryptFormat", volumeSearchBuilder.entity().getEncryptFormat(), SearchCriteria.Op.NNULL); + } else { + volumeSearchBuilder.and("encryptFormat", volumeSearchBuilder.entity().getEncryptFormat(), SearchCriteria.Op.NULL); + } + } if (keyword != null) { volumeSearchBuilder.and().op("keywordName", volumeSearchBuilder.entity().getName(), SearchCriteria.Op.LIKE); @@ -2540,8 +2622,8 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q } } - if (diskOffId != null) { - sc.setParameters("diskOfferingId", diskOffId); + if (diskOfferingId != null) { + sc.setParameters("diskOfferingId", diskOfferingId); } if (id != null) { @@ -2956,7 +3038,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q public ListResponse searchForStoragePools(ListStoragePoolsCmd cmd) { Pair, Integer> result = (ScopeType.HOST.name().equalsIgnoreCase(cmd.getScope()) && cmd.getHostId() != null) ? searchForLocalStorages(cmd) : searchForStoragePoolsInternal(cmd); - return createStoragesPoolResponse(result); + return createStoragesPoolResponse(result, cmd.getCustomStats()); } private Pair, Integer> searchForLocalStorages(ListStoragePoolsCmd cmd) { @@ -2974,10 +3056,20 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q return new Pair<>(pools, pools.size()); } - private ListResponse createStoragesPoolResponse(Pair, Integer> storagePools) { + private void setPoolResponseNFSMountOptions(StoragePoolResponse poolResponse, Long poolId) { + if (Storage.StoragePoolType.NetworkFilesystem.toString().equals(poolResponse.getType()) && + HypervisorType.KVM.toString().equals(poolResponse.getHypervisor())) { + StoragePoolDetailVO detail = _storagePoolDetailsDao.findDetail(poolId, ApiConstants.NFS_MOUNT_OPTIONS); + if (detail != null) { + poolResponse.setNfsMountOpts(detail.getValue()); + } + } + } + + private ListResponse createStoragesPoolResponse(Pair, Integer> storagePools, boolean getCustomStats) { ListResponse response = new ListResponse<>(); - List poolResponses = ViewResponseHelper.createStoragePoolResponse(storagePools.first().toArray(new StoragePoolJoinVO[storagePools.first().size()])); + List poolResponses = ViewResponseHelper.createStoragePoolResponse(getCustomStats, storagePools.first().toArray(new StoragePoolJoinVO[storagePools.first().size()])); Map poolUuidToIdMap = storagePools.first().stream().collect(Collectors.toMap(StoragePoolJoinVO::getUuid, StoragePoolJoinVO::getId, (a, b) -> a)); for (StoragePoolResponse poolResponse : poolResponses) { DataStore store = dataStoreManager.getPrimaryDataStore(poolResponse.getId()); @@ -2995,6 +3087,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q poolResponse.setCaps(caps); } } + setPoolResponseNFSMountOptions(poolResponse, poolUuidToIdMap.get(poolResponse.getId())); } response.setResponses(poolResponses, storagePools.second()); @@ -3112,7 +3205,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q @Override public ListResponse searchForImageStores(ListImageStoresCmd cmd) { Pair, Integer> result = searchForImageStoresInternal(cmd); - ListResponse response = new ListResponse(); + ListResponse response = new ListResponse<>(); List poolResponses = ViewResponseHelper.createImageStoreResponse(result.first().toArray(new ImageStoreJoinVO[result.first().size()])); response.setResponses(poolResponses, result.second()); @@ -3565,7 +3658,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q public ListResponse searchForServiceOfferings(ListServiceOfferingsCmd cmd) { Pair, Integer> result = searchForServiceOfferingsInternal(cmd); result.first(); - ListResponse response = new ListResponse(); + ListResponse response = new ListResponse<>(); List offeringResponses = ViewResponseHelper.createServiceOfferingResponse(result.first().toArray(new ServiceOfferingJoinVO[result.first().size()])); response.setResponses(offeringResponses, result.second()); return response; @@ -4391,7 +4484,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q null, cmd.getPageSizeVal(), cmd.getStartIndex(), cmd.getZoneId(), cmd.getStoragePoolId(), cmd.getImageStoreId(), hypervisorType, showDomr, cmd.listInReadyState(), permittedAccounts, caller, listProjectResourcesCriteria, tags, showRemovedTmpl, cmd.getIds(), parentTemplateId, cmd.getShowUnique(), - templateType, isVnf, forCks); + templateType, isVnf, cmd.getArch(), forCks); } private Pair, Integer> searchForTemplatesInternal(Long templateId, String name, String keyword, @@ -4400,7 +4493,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q boolean showDomr, boolean onlyReady, List permittedAccounts, Account caller, ListProjectResourcesCriteria listProjectResourcesCriteria, Map tags, boolean showRemovedTmpl, List ids, Long parentTemplateId, Boolean showUnique, String templateType, - Boolean isVnf, Boolean forCks) { + Boolean isVnf, CPU.CPUArch arch, Boolean forCks) { // check if zone is configured, if not, just return empty list List hypers = null; @@ -4438,6 +4531,10 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q sc.addAnd("dataStoreId", SearchCriteria.Op.EQ, imageStoreId); } + if (arch != null) { + sc.addAnd("arch", SearchCriteria.Op.EQ, arch); + } + if (storagePoolId != null) { sc.setJoinParameters("storagePool", "pool_id", storagePoolId); } @@ -4669,8 +4766,12 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q // other criteria if (keyword != null) { - sc.addAnd("name", SearchCriteria.Op.LIKE, "%" + keyword + "%"); - } else if (name != null) { + SearchCriteria scc = _templateJoinDao.createSearchCriteria(); + scc.addOr("name", SearchCriteria.Op.LIKE, "%" + keyword + "%"); + scc.addOr("displayText", SearchCriteria.Op.LIKE, "%" + keyword + "%"); + sc.addAnd("name", SearchCriteria.Op.SC, scc); + } + if (name != null) { sc.addAnd("name", SearchCriteria.Op.EQ, name); } @@ -4804,7 +4905,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q boolean showRemovedISO = cmd.getShowRemoved(); Account caller = CallContext.current().getCallingAccount(); - boolean listAll = cmd.listAll(); + boolean listAll = false; if (isoFilter != null && isoFilter == TemplateFilter.all) { if (caller.getType() == Account.Type.NORMAL) { throw new InvalidParameterValueException("Filter " + TemplateFilter.all + " can be specified by admin only"); @@ -4827,7 +4928,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q return searchForTemplatesInternal(cmd.getId(), cmd.getIsoName(), cmd.getKeyword(), isoFilter, true, cmd.isBootable(), cmd.getPageSizeVal(), cmd.getStartIndex(), cmd.getZoneId(), cmd.getStoragePoolId(), cmd.getImageStoreId(), hypervisorType, true, cmd.listInReadyState(), permittedAccounts, caller, listProjectResourcesCriteria, - tags, showRemovedISO, null, null, cmd.getShowUnique(), null, null, null); + tags, showRemovedISO, null, null, cmd.getShowUnique(), null, null, cmd.getArch(), null); } @Override @@ -5723,6 +5824,6 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q @Override public ConfigKey[] getConfigKeys() { return new ConfigKey[] {AllowUserViewDestroyedVM, UserVMDeniedDetails, UserVMReadOnlyDetails, SortKeyAscending, - AllowUserViewAllDomainAccounts, SharePublicTemplatesWithOtherDomains}; + AllowUserViewAllDomainAccounts, SharePublicTemplatesWithOtherDomains, ReturnVmStatsOnVmList}; } } diff --git a/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java b/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java index 0c70839765b..db650bf7c3e 100644 --- a/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java +++ b/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java @@ -167,6 +167,7 @@ public class ViewResponseHelper { // update nics, securitygroups, tags, affinitygroups for 1 to many mapping fields userVmData = ApiDBUtils.fillVmDetails(view, userVmData, userVm); } + userVmData.setIpAddress(userVmData.getNics()); vmDataList.put(userVm.getId(), userVmData); } return new ArrayList(vmDataList.values()); @@ -312,14 +313,14 @@ public class ViewResponseHelper { return new ArrayList(vrDataList.values()); } - public static List createStoragePoolResponse(StoragePoolJoinVO... pools) { + public static List createStoragePoolResponse(boolean customStats, StoragePoolJoinVO... pools) { LinkedHashMap vrDataList = new LinkedHashMap<>(); // Initialise the vrdatalist with the input data for (StoragePoolJoinVO vr : pools) { StoragePoolResponse vrData = vrDataList.get(vr.getId()); if (vrData == null) { // first time encountering this vm - vrData = ApiDBUtils.newStoragePoolResponse(vr); + vrData = ApiDBUtils.newStoragePoolResponse(vr, customStats); } else { // update tags vrData = ApiDBUtils.fillStoragePoolDetails(vrData, vr); diff --git a/server/src/main/java/com/cloud/api/query/dao/AffinityGroupJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/AffinityGroupJoinDaoImpl.java index 2a876ea8226..a5fd2bf11f1 100644 --- a/server/src/main/java/com/cloud/api/query/dao/AffinityGroupJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/AffinityGroupJoinDaoImpl.java @@ -21,21 +21,46 @@ import java.util.List; import javax.inject.Inject; - import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupResponse; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.dedicated.DedicatedResourceResponse; import com.cloud.api.ApiResponseHelper; import com.cloud.api.query.vo.AffinityGroupJoinVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.dc.DataCenter; +import com.cloud.dc.DedicatedResourceVO; +import com.cloud.dc.DedicatedResources; +import com.cloud.dc.HostPodVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.DedicatedResourceDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.host.Host; +import com.cloud.host.dao.HostDao; +import com.cloud.org.Cluster; +import com.cloud.user.AccountManager; public class AffinityGroupJoinDaoImpl extends GenericDaoBase implements AffinityGroupJoinDao { @Inject private ConfigurationDao _configDao; + @Inject + private DedicatedResourceDao dedicatedResourceDao; + @Inject + private DataCenterDao dataCenterDao; + @Inject + private HostPodDao podDao; + @Inject + private ClusterDao clusterDao; + @Inject + private HostDao hostDao; + @Inject + private AccountManager accountManager; private final SearchBuilder agSearch; @@ -64,6 +89,14 @@ public class AffinityGroupJoinDaoImpl extends GenericDaoBase dedicatedResources = dedicatedResourceDao.listByAffinityGroupId(vag.getId()); + this.populateDedicatedResourcesField(dedicatedResources, agResponse); + } + // update vm information long instanceId = vag.getVmId(); if (instanceId > 0) { @@ -76,6 +109,32 @@ public class AffinityGroupJoinDaoImpl extends GenericDaoBase dedicatedResources, AffinityGroupResponse agResponse) { + if (dedicatedResources.isEmpty()) { + return; + } + + for (DedicatedResourceVO resource : dedicatedResources) { + DedicatedResourceResponse dedicatedResourceResponse = null; + + if (resource.getDataCenterId() != null) { + DataCenter dataCenter = dataCenterDao.findById(resource.getDataCenterId()); + dedicatedResourceResponse = new DedicatedResourceResponse(dataCenter.getUuid(), dataCenter.getName(), DedicatedResources.Type.Zone); + } else if (resource.getPodId() != null) { + HostPodVO pod = podDao.findById(resource.getPodId()); + dedicatedResourceResponse = new DedicatedResourceResponse(pod.getUuid(), pod.getName(), DedicatedResources.Type.Pod); + } else if (resource.getClusterId() != null) { + Cluster cluster = clusterDao.findById(resource.getClusterId()); + dedicatedResourceResponse = new DedicatedResourceResponse(cluster.getUuid(), cluster.getName(), DedicatedResources.Type.Cluster); + } else if (resource.getHostId() != null) { + Host host = hostDao.findById(resource.getHostId()); + dedicatedResourceResponse = new DedicatedResourceResponse(host.getUuid(), host.getName(), DedicatedResources.Type.Host); + } + + agResponse.addDedicatedResource(dedicatedResourceResponse); + } + } + @Override public AffinityGroupResponse setAffinityGroupResponse(AffinityGroupResponse vagData, AffinityGroupJoinVO vag) { // update vm information diff --git a/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java index 2bfbb3b9d67..2b3be728bd3 100644 --- a/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java @@ -18,9 +18,13 @@ package com.cloud.api.query.dao; import java.util.List; import java.util.Objects; +import java.util.stream.Collectors; import javax.inject.Inject; +import com.cloud.cpu.CPU; +import com.cloud.dc.ASNumberRangeVO; +import com.cloud.dc.dao.ASNumberRangeDao; import com.cloud.network.dao.NsxProviderDao; import com.cloud.network.element.NsxProviderVO; import org.apache.cloudstack.annotation.AnnotationService; @@ -30,6 +34,7 @@ import org.apache.cloudstack.api.response.ResourceIconResponse; import org.apache.cloudstack.api.response.ResourceTagResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.ObjectUtils; import org.springframework.stereotype.Component; @@ -56,6 +61,8 @@ public class DataCenterJoinDaoImpl extends GenericDaoBase clusterArchs = ApiDBUtils.listZoneClustersArchs(dataCenter.getId()); + zoneResponse.setMultiArch(CollectionUtils.isNotEmpty(clusterArchs) && clusterArchs.size() > 1); + + List asNumberRange = asNumberRangeDao.listByZoneId(dataCenter.getId()); + String asRange = asNumberRange.stream().map(range -> range.getStartASNumber() + "-" + range.getEndASNumber()).collect(Collectors.joining(", ")); + zoneResponse.setAsnRange(asRange); + zoneResponse.setResourceDetails(ApiDBUtils.getResourceDetails(dataCenter.getId(), ResourceObjectType.Zone)); zoneResponse.setHasAnnotation(annotationDao.hasAnnotations(dataCenter.getUuid(), AnnotationService.EntityType.ZONE.name(), _accountMgr.isRootAdmin(CallContext.current().getCallingAccount().getId()))); diff --git a/server/src/main/java/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java index 8076755bb71..e53f94fa0c3 100644 --- a/server/src/main/java/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java @@ -38,11 +38,13 @@ import com.cloud.offering.DiskOffering; import com.cloud.offering.ServiceOffering; import com.cloud.server.ResourceTag; import com.cloud.user.AccountManager; -import com.cloud.utils.db.Attribute; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import static org.apache.cloudstack.query.QueryService.SortKeyAscending; + @Component public class DiskOfferingJoinDaoImpl extends GenericDaoBase implements DiskOfferingJoinDao { @@ -57,7 +59,6 @@ public class DiskOfferingJoinDaoImpl extends GenericDaoBase dofIdSearch; private SearchBuilder diskOfferingSearch; - private final Attribute _typeAttr; protected DiskOfferingJoinDaoImpl() { @@ -69,9 +70,6 @@ public class DiskOfferingJoinDaoImpl extends GenericDaoBase searchByIds(Long... offeringIds) { + Filter searchFilter = new Filter(DiskOfferingJoinVO.class, "sortKey", SortKeyAscending.value()); + searchFilter.addOrderBy(DiskOfferingJoinVO.class, "id", true); // set detail batch query size int DETAILS_BATCH_SIZE = 2000; String batchCfg = configDao.getValue("detail.batch.query.size"); @@ -184,7 +184,7 @@ public class DiskOfferingJoinDaoImpl extends GenericDaoBase sc = diskOfferingSearch.create(); sc.setParameters("idIN", ids); - List accounts = searchIncludingRemoved(sc, null, null, false); + List accounts = searchIncludingRemoved(sc, searchFilter, null, false); if (accounts != null) { uvList.addAll(accounts); } @@ -200,7 +200,7 @@ public class DiskOfferingJoinDaoImpl extends GenericDaoBase sc = diskOfferingSearch.create(); sc.setParameters("idIN", ids); - List accounts = searchIncludingRemoved(sc, null, null, false); + List accounts = searchIncludingRemoved(sc, searchFilter, null, false); if (accounts != null) { uvList.addAll(accounts); } diff --git a/server/src/main/java/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java index c6041c3e373..2ef019a5380 100644 --- a/server/src/main/java/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java @@ -208,6 +208,7 @@ public class DomainRouterJoinDaoImpl extends GenericDaoBase implements hostResponse.setImplicitHostTags(host.getImplicitTag()); hostResponse.setHypervisorVersion(host.getHypervisorVersion()); + if (host.getArch() != null) { + hostResponse.setArch(host.getArch().getType()); + } float cpuWithOverprovisioning = host.getCpus() * host.getSpeed() * cpuOverprovisioningFactor; hostResponse.setCpuAllocatedValue(cpu); @@ -238,16 +241,16 @@ public class HostJoinDaoImpl extends GenericDaoBase implements Map hostDetails = hostDetailsDao.findDetails(host.getId()); if (hostDetails != null) { if (hostDetails.containsKey(Host.HOST_UEFI_ENABLE)) { - hostResponse.setUefiCapabilty(Boolean.parseBoolean((String) hostDetails.get(Host.HOST_UEFI_ENABLE))); + hostResponse.setUefiCapability(Boolean.parseBoolean((String) hostDetails.get(Host.HOST_UEFI_ENABLE))); } else { - hostResponse.setUefiCapabilty(new Boolean(false)); + hostResponse.setUefiCapability(new Boolean(false)); } } if (details.contains(HostDetails.all) && (host.getHypervisorType() == Hypervisor.HypervisorType.KVM || host.getHypervisorType() == Hypervisor.HypervisorType.Custom)) { //only kvm has the requirement to return host details try { - hostResponse.setDetails(hostDetails); + hostResponse.setDetails(hostDetails, host.getHypervisorType()); } catch (Exception e) { logger.debug("failed to get host details", e); } diff --git a/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDaoImpl.java index 3d50b88846c..22b51f6fd8c 100644 --- a/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDaoImpl.java @@ -124,6 +124,12 @@ public class NetworkOfferingJoinDaoImpl extends GenericDaoBase> listDomainsOfServiceOfferingsUsedByDomainPath(String domainPath); + List searchByIds(Long... id); } diff --git a/server/src/main/java/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java index bf6167e7f3e..d3c7a7decde 100644 --- a/server/src/main/java/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java @@ -19,35 +19,38 @@ package com.cloud.api.query.dao; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; -import java.util.ArrayList; import java.util.List; import java.util.Map; -import com.cloud.dc.VsphereStoragePolicyVO; -import com.cloud.dc.dao.VsphereStoragePolicyDao; -import com.cloud.user.AccountManager; -import com.cloud.utils.db.TransactionLegacy; +import javax.inject.Inject; + import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; -import com.cloud.storage.DiskOfferingVO; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; - +import org.apache.commons.lang3.StringUtils; import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; import com.cloud.api.query.vo.ServiceOfferingJoinVO; +import com.cloud.dc.VsphereStoragePolicyVO; +import com.cloud.dc.dao.VsphereStoragePolicyDao; import com.cloud.offering.ServiceOffering; import com.cloud.server.ResourceTag.ResourceObjectType; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.user.AccountManager; import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionLegacy; -import javax.inject.Inject; +import static org.apache.cloudstack.query.QueryService.SortKeyAscending; @Component public class ServiceOfferingJoinDaoImpl extends GenericDaoBase implements ServiceOfferingJoinDao { @@ -167,6 +170,10 @@ public class ServiceOfferingJoinDaoImpl extends GenericDaoBase searchByIds(Long... offeringIds) { + Filter searchFilter = new Filter(ServiceOfferingJoinVO.class, "sortKey", SortKeyAscending.value()); + searchFilter.addOrderBy(ServiceOfferingJoinVO.class, "id", true); + // set detail batch query size int DETAILS_BATCH_SIZE = 2000; String batchCfg = configDao.getValue("detail.batch.query.size"); @@ -247,9 +257,9 @@ public class ServiceOfferingJoinDaoImpl extends GenericDaoBase sc = srvOfferingSearch.create(); sc.setParameters("idIN", ids); - List accounts = searchIncludingRemoved(sc, null, null, false); - if (accounts != null) { - uvList.addAll(accounts); + List offerings = searchIncludingRemoved(sc, searchFilter, null, false); + if (offerings != null) { + uvList.addAll(offerings); } curr_index += DETAILS_BATCH_SIZE; } @@ -263,9 +273,9 @@ public class ServiceOfferingJoinDaoImpl extends GenericDaoBase sc = srvOfferingSearch.create(); sc.setParameters("idIN", ids); - List accounts = searchIncludingRemoved(sc, null, null, false); - if (accounts != null) { - uvList.addAll(accounts); + List offerings = searchIncludingRemoved(sc, searchFilter, null, false); + if (offerings != null) { + uvList.addAll(offerings); } } return uvList; diff --git a/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java index 0810540a377..42a35f4412c 100644 --- a/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java @@ -34,16 +34,21 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.query.QueryService; +import com.cloud.api.ApiDBUtils; import com.cloud.api.ApiResponseHelper; import com.cloud.api.query.vo.SnapshotJoinVO; +import com.cloud.storage.GuestOS; import com.cloud.storage.Snapshot; import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.storage.Volume.Type; +import com.cloud.storage.VolumeVO; import com.cloud.user.Account; import com.cloud.user.AccountService; import com.cloud.utils.Pair; import com.cloud.utils.db.Filter; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.vm.VMInstanceVO; public class SnapshotJoinDaoImpl extends GenericDaoBaseWithTagInformation implements SnapshotJoinDao { @@ -120,7 +125,18 @@ public class SnapshotJoinDaoImpl extends GenericDaoBaseWithTagInformation { - StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO host); + StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO host, boolean customStats); StoragePoolResponse setStoragePoolResponse(StoragePoolResponse response, StoragePoolJoinVO host); diff --git a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java index 14de5ffe71d..c401cabe233 100644 --- a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java @@ -50,6 +50,9 @@ import com.cloud.utils.StringUtils; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import org.apache.commons.collections.MapUtils; + +import java.util.Map; @Component public class StoragePoolJoinDaoImpl extends GenericDaoBase implements StoragePoolJoinDao { @@ -100,7 +103,7 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase storageCustomStats = driver.getCustomStorageStats(storagePool); + if (MapUtils.isNotEmpty(storageCustomStats)) { + poolResponse.setCustomStats(storageCustomStats); + } + } } // TODO: StatsCollector does not persist data @@ -162,6 +172,7 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase childTemplatesSet = new HashSet(); @@ -414,6 +418,7 @@ public class TemplateJoinDaoImpl extends GenericDaoBaseWithTagInformation 0) { DiskOffering computeOnlyDiskOffering = ApiDBUtils.findComputeOnlyDiskOfferingById(volume.getDiskOfferingId()); - if (computeOnlyDiskOffering != null) { - ServiceOffering serviceOffering = ApiDBUtils.findServiceOfferingByComputeOnlyDiskOffering(volume.getDiskOfferingId()); + ServiceOffering serviceOffering = getServiceOfferingForDiskOffering(volume, computeOnlyDiskOffering); + if (serviceOffering != null) { volResponse.setServiceOfferingId(String.valueOf(serviceOffering.getId())); volResponse.setServiceOfferingName(serviceOffering.getName()); volResponse.setServiceOfferingDisplayText(serviceOffering.getDisplayText()); @@ -276,9 +283,31 @@ public class VolumeJoinDaoImpl extends GenericDaoBaseWithTagInformation getEntityType() { diff --git a/server/src/main/java/com/cloud/api/query/vo/VolumeJoinVO.java b/server/src/main/java/com/cloud/api/query/vo/VolumeJoinVO.java index 8a9804c8e96..2ae720fa852 100644 --- a/server/src/main/java/com/cloud/api/query/vo/VolumeJoinVO.java +++ b/server/src/main/java/com/cloud/api/query/vo/VolumeJoinVO.java @@ -277,6 +277,12 @@ public class VolumeJoinVO extends BaseViewWithTagInformationVO implements Contro @Column(name = "external_uuid") private String externalUuid = null; + @Column(name = "encrypt_format") + private String encryptionFormat = null; + + @Column(name = "delete_protection") + protected Boolean deleteProtection; + public VolumeJoinVO() { } @@ -612,9 +618,16 @@ public class VolumeJoinVO extends BaseViewWithTagInformationVO implements Contro this.externalUuid = externalUuid; } + public String getEncryptionFormat() { + return encryptionFormat; + } + + public Boolean getDeleteProtection() { + return deleteProtection; + } + @Override public Class getEntityType() { return Volume.class; } - } diff --git a/server/src/main/java/com/cloud/api/query/vo/VpcOfferingJoinVO.java b/server/src/main/java/com/cloud/api/query/vo/VpcOfferingJoinVO.java index 215c94dea25..d72f5b61907 100644 --- a/server/src/main/java/com/cloud/api/query/vo/VpcOfferingJoinVO.java +++ b/server/src/main/java/com/cloud/api/query/vo/VpcOfferingJoinVO.java @@ -27,6 +27,7 @@ import javax.persistence.Id; import javax.persistence.Table; import com.cloud.network.vpc.VpcOffering; +import com.cloud.offering.NetworkOffering; import com.cloud.utils.db.GenericDao; @Entity @@ -80,8 +81,8 @@ public class VpcOfferingJoinVO implements VpcOffering { @Column(name = "for_nsx") boolean forNsx = false; - @Column(name = "nsx_mode") - String nsxMode; + @Column(name = "network_mode") + NetworkOffering.NetworkMode networkMode; @Column(name = "domain_id") private String domainId; @@ -107,6 +108,13 @@ public class VpcOfferingJoinVO implements VpcOffering { @Column(name = "internet_protocol") private String internetProtocol = null; + @Column(name="routing_mode") + @Enumerated(value = EnumType.STRING) + private NetworkOffering.RoutingMode routingMode; + + @Column(name = "specify_as_number") + private Boolean specifyAsNumber = false; + public VpcOfferingJoinVO() { } @@ -150,8 +158,8 @@ public class VpcOfferingJoinVO implements VpcOffering { } @Override - public String getNsxMode() { - return nsxMode; + public NetworkOffering.NetworkMode getNetworkMode() { + return networkMode; } @Override @@ -164,6 +172,24 @@ public class VpcOfferingJoinVO implements VpcOffering { return created; } + @Override + public NetworkOffering.RoutingMode getRoutingMode() { + return routingMode; + } + + public void setRoutingMode(NetworkOffering.RoutingMode routingMode) { + this.routingMode = routingMode; + } + + @Override + public Boolean isSpecifyAsNumber() { + return specifyAsNumber; + } + + public void setSpecifyAsNumber(Boolean specifyAsNumber) { + this.specifyAsNumber = specifyAsNumber; + } + @Override public Long getServiceOfferingId() { return serviceOfferingId; diff --git a/server/src/main/java/com/cloud/bgp/BGPServiceImpl.java b/server/src/main/java/com/cloud/bgp/BGPServiceImpl.java new file mode 100644 index 00000000000..0e6ae7ade1a --- /dev/null +++ b/server/src/main/java/com/cloud/bgp/BGPServiceImpl.java @@ -0,0 +1,438 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.bgp; + +import com.cloud.dc.ASNumberRangeVO; +import com.cloud.dc.ASNumberVO; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.ASNumberDao; +import com.cloud.dc.dao.ASNumberRangeDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.domain.Domain; +import com.cloud.domain.dao.DomainDao; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.Network; +import com.cloud.network.NetworkModel; +import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkServiceMapDao; +import com.cloud.network.dao.NetworkVO; +import com.cloud.network.element.BgpServiceProvider; +import com.cloud.network.element.NetworkElement; +import com.cloud.network.vpc.Vpc; +import com.cloud.network.vpc.VpcOfferingVO; +import com.cloud.network.vpc.VpcVO; +import com.cloud.network.vpc.dao.VpcDao; +import com.cloud.network.vpc.dao.VpcOfferingDao; +import com.cloud.network.vpc.dao.VpcServiceMapDao; +import com.cloud.offering.NetworkOffering; +import com.cloud.offerings.NetworkOfferingVO; +import com.cloud.offerings.dao.NetworkOfferingDao; +import com.cloud.event.ActionEvent; +import com.cloud.event.EventTypes; +import com.cloud.user.Account; +import com.cloud.user.dao.AccountDao; +import com.cloud.utils.Pair; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.api.command.user.bgp.ListASNumbersCmd; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.network.BgpPeerVO; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.apache.cloudstack.network.dao.BgpPeerDao; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.BooleanUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import javax.inject.Inject; +import java.security.InvalidParameterException; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +public class BGPServiceImpl implements BGPService { + + public static final Logger LOGGER = LogManager.getLogger(BGPServiceImpl.class); + + @Inject + private DataCenterDao dataCenterDao; + @Inject + private ASNumberRangeDao asNumberRangeDao; + @Inject + private ASNumberDao asNumberDao; + @Inject + private NetworkDao networkDao; + @Inject + private VpcDao vpcDao; + @Inject + private VpcOfferingDao vpcOfferingDao; + @Inject + private NetworkOfferingDao networkOfferingDao; + @Inject + private AccountDao accountDao; + @Inject + private DomainDao domainDao; + @Inject + NetworkServiceMapDao ntwkSrvcDao; + @Inject + NetworkModel networkModel; + @Inject + BgpPeerDao bgpPeerDao; + @Inject + RoutedIpv4Manager routedIpv4Manager; + @Inject + VpcServiceMapDao vpcServiceMapDao; + + public BGPServiceImpl() { + } + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_AS_RANGE_CREATE, eventDescription = "AS Range creation") + public ASNumberRange createASNumberRange(long zoneId, long startASNumber, long endASNumber) { + DataCenterVO zone = dataCenterDao.findById(zoneId); + if (zone == null) { + String msg = String.format("Cannot find a zone with ID %s", zoneId); + LOGGER.error(msg); + throw new InvalidParameterException(msg); + } + if (startASNumber > endASNumber) { + String msg = "Please specify a valid AS Number range"; + LOGGER.error(msg); + throw new InvalidParameterException(msg); + } + + List asNumberRanges = asNumberRangeDao.listByZoneId(zoneId); + for (ASNumberRangeVO asNumberRange : asNumberRanges) { + if (isASNumbersOverlap(asNumberRange.getStartASNumber(), asNumberRange.getEndASNumber(), startASNumber, endASNumber)) { + throw new InvalidParameterException(String.format("New AS number range (%s-%s) has conflict with an existing AS number range (%s-%s)", + startASNumber, endASNumber, asNumberRange.getStartASNumber(), asNumberRange.getEndASNumber())); + } + } + + try { + return Transaction.execute((TransactionCallback) status -> { + LOGGER.debug(String.format("Persisting AS Number Range %s-%s for the zone %s", startASNumber, endASNumber, zone.getName())); + ASNumberRangeVO asNumberRangeVO = new ASNumberRangeVO(zoneId, startASNumber, endASNumber); + asNumberRangeDao.persist(asNumberRangeVO); + + for (long asn = startASNumber; asn <= endASNumber; asn++) { + LOGGER.debug(String.format("Persisting AS Number %s for zone %s", asn, zone.getName())); + ASNumberVO asNumber = new ASNumberVO(asn, asNumberRangeVO.getId(), zoneId); + asNumberDao.persist(asNumber); + } + return asNumberRangeVO; + }); + } catch (Exception e) { + String err = String.format("Error creating AS Number range %s-%s for zone %s: %s", startASNumber, endASNumber, zone.getName(), e.getMessage()); + LOGGER.error(err, e); + throw new CloudRuntimeException(err); + } + } + + protected boolean isASNumbersOverlap(long startNumber1, long endNumber1, long startNumber2, long endNumber2) { + if (startNumber1 > endNumber2 || startNumber2 > endNumber1) { + return false; + } + return true; + } + + @Override + public List listASNumberRanges(Long zoneId) { + List rangeVOList = zoneId != null ? asNumberRangeDao.listByZoneId(zoneId) : asNumberRangeDao.listAll(); + return new ArrayList<>(rangeVOList); + } + + @Override + public Pair, Integer> listASNumbers(ListASNumbersCmd cmd) { + Long zoneId = cmd.getZoneId(); + Long asNumberRangeId = cmd.getAsNumberRangeId(); + Integer asNumber = cmd.getAsNumber(); + Boolean allocated = cmd.getAllocated(); + Long networkId = cmd.getNetworkId(); + Long vpcId = cmd.getVpcId(); + String accountName = cmd.getAccount(); + Long domainId = cmd.getDomainId(); + Long startIndex = cmd.getStartIndex(); + Long pageSizeVal = cmd.getPageSizeVal(); + String keyword = cmd.getKeyword(); + + Account userAccount = null; + Domain domain = null; + final Account caller = CallContext.current().getCallingAccount(); + if (Objects.nonNull(accountName)) { + if (domainId != null) { + userAccount = accountDao.findActiveAccount(accountName, domainId); + domain = domainDao.findById(domainId); + } else { + userAccount = accountDao.findActiveAccount(accountName, caller.getDomainId()); + domain = domainDao.findById(caller.getDomainId()); + } + } + + if (Objects.nonNull(accountName) && Objects.isNull(userAccount)) { + throw new InvalidParameterException(String.format("Failed to find user Account: %s", accountName)); + } + + Long networkSearchId = networkId; + Long vpcSerchId = vpcId; + if (networkId != null) { + NetworkVO network = networkDao.findById(networkId); + if (network == null) { + throw new InvalidParameterException(String.format("Failed to find network with ID: %s", networkId)); + } + if (network.getVpcId() != null) { + LOGGER.debug(String.format("The network %s is a VPC tier, searching for the AS number on the VPC with ID %s", + network.getName(), network.getVpcId())); + networkSearchId = null; + vpcSerchId = network.getVpcId(); + } + } + Pair, Integer> pair = asNumberDao.searchAndCountByZoneOrRangeOrAllocated(zoneId, asNumberRangeId, + asNumber, networkSearchId, vpcSerchId, allocated, Objects.nonNull(userAccount) ? userAccount.getId() : null, + Objects.nonNull(domain) ? domain.getId() : null, keyword, caller, startIndex, pageSizeVal); + return new Pair<>(new ArrayList<>(pair.first()), pair.second()); + } + + @Override + public boolean allocateASNumber(long zoneId, Long asNumber, Long networkId, Long vpcId) { + ASNumberVO asNumberVO = isOfferingSpecifyAsNumber(networkId, vpcId) ? + asNumberDao.findByAsNumber(asNumber) : + asNumberDao.findOneByAllocationStateAndZone(zoneId, false); + if (asNumberVO == null || asNumberVO.getDataCenterId() != zoneId) { + if (asNumber != null) { + LOGGER.error(String.format("Cannot find AS number %s in zone with ID %s", asNumber, zoneId)); + return false; + } + throw new CloudRuntimeException(String.format("Cannot allocate AS number in zone with ID %s", zoneId)); + } + long accountId, domainId; + String netName; + if (Objects.nonNull(vpcId)) { + VpcVO vpc = vpcDao.findById(vpcId); + if (vpc == null) { + LOGGER.error(String.format("Cannot find VPC with ID %s", vpcId)); + return false; + } + accountId = vpc.getAccountId(); + domainId = vpc.getDomainId(); + netName = vpc.getName(); + } else { + NetworkVO network = networkDao.findById(networkId); + if (network == null) { + LOGGER.error(String.format("Cannot find network with ID %s", networkId)); + return false; + } + accountId = network.getAccountId(); + domainId = network.getDomainId(); + netName = network.getName(); + } + + LOGGER.debug(String.format("Allocating the AS Number %s to %s %s on zone %s", asNumber, + (Objects.nonNull(vpcId) ? "VPC" : "network"), netName, zoneId)); + asNumberVO.setAllocated(true); + asNumberVO.setAllocatedTime(new Date()); + if (Objects.nonNull(vpcId)) { + asNumberVO.setVpcId(vpcId); + } else { + asNumberVO.setNetworkId(networkId); + } + asNumberVO.setAccountId(accountId); + asNumberVO.setDomainId(domainId); + return asNumberDao.update(asNumberVO.getId(), asNumberVO); + } + + private boolean isOfferingSpecifyAsNumber(Long networkId, Long vpcId) { + if (Objects.nonNull(vpcId)) { + VpcVO vpc = vpcDao.findById(vpcId); + if (vpc == null) { + throw new CloudRuntimeException(String.format("Cannot find VPC with ID %s", vpcId)); + } + VpcOfferingVO vpcOffering = vpcOfferingDao.findById(vpc.getVpcOfferingId()); + return NetworkOffering.RoutingMode.Dynamic == vpcOffering.getRoutingMode() && BooleanUtils.toBoolean(vpcOffering.isSpecifyAsNumber()); + } else { + NetworkVO network = networkDao.findById(networkId); + NetworkOfferingVO networkOffering = networkOfferingDao.findById(network.getNetworkOfferingId()); + return NetworkOffering.RoutingMode.Dynamic == networkOffering.getRoutingMode() && BooleanUtils.toBoolean(networkOffering.isSpecifyAsNumber()); + } + } + + private Pair logAndReturnErrorMessage(String msg) { + LOGGER.error(msg); + return new Pair<>(false, msg); + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_AS_NUMBER_RELEASE, eventDescription = "Releasing AS Number") + public Pair releaseASNumber(long zoneId, long asNumber, boolean isDestroyNetworkOperation) { + ASNumberVO asNumberVO = asNumberDao.findByAsNumber(asNumber); + if (asNumberVO == null) { + return logAndReturnErrorMessage(String.format("Cannot find AS Number %s on zone %s", asNumber, zoneId)); + } + if (!asNumberVO.isAllocated()) { + LOGGER.debug(String.format("The AS Number %s is not allocated to any network on zone %s, ignoring release", asNumber, zoneId)); + return new Pair<>(true, ""); + } + Long networkId = asNumberVO.getNetworkId(); + Long vpcId = asNumberVO.getVpcId(); + if (!isDestroyNetworkOperation) { + Pair checksResult = performReleaseASNumberChecks(networkId, vpcId, asNumber); + if (checksResult != null) { + return checksResult; + } + } + LOGGER.debug(String.format("Releasing AS Number %s on zone %s from previous allocation", asNumber, zoneId)); + asNumberVO.setAllocated(false); + asNumberVO.setAllocatedTime(null); + asNumberVO.setDomainId(null); + asNumberVO.setAccountId(null); + if (vpcId != null) { + asNumberVO.setVpcId(null); + } else { + asNumberVO.setNetworkId(null); + } + boolean update = asNumberDao.update(asNumberVO.getId(), asNumberVO); + String msg = update ? "OK" : "Could not update database record for AS number"; + return new Pair<>(update, msg); + } + + private Pair performReleaseASNumberChecks(Long networkId, Long vpcId, long asNumber) { + if (networkId != null) { + NetworkVO network = networkDao.findById(networkId); + if (network == null) { + return logAndReturnErrorMessage(String.format("Cannot find a network with ID %s which acquired the AS number %s", networkId, asNumber)); + } + NetworkOfferingVO offering = networkOfferingDao.findById(network.getNetworkOfferingId()); + if (offering == null) { + return logAndReturnErrorMessage(String.format("Cannot find a network offering with ID %s", network.getNetworkOfferingId())); + } + if (offering.isSpecifyAsNumber()) { + return logAndReturnErrorMessage(String.format("Cannot release the AS number %s as it is acquired by a network that requires AS number", asNumber)); + } + } else if (vpcId != null) { + VpcVO vpc = vpcDao.findById(vpcId); + if (vpc == null) { + return logAndReturnErrorMessage(String.format("Cannot find a VPC with ID %s which acquired the AS number %s", vpcId, asNumber)); + } + VpcOfferingVO vpcOffering = vpcOfferingDao.findById(vpc.getVpcOfferingId()); + if (vpcOffering == null) { + return logAndReturnErrorMessage(String.format("Cannot find a VPC offering with ID %s", vpc.getVpcOfferingId())); + } + if (vpcOffering.isSpecifyAsNumber()) { + return logAndReturnErrorMessage(String.format("Cannot release the AS number %s as it is acquired by a VPC that requires AS number", asNumber)); + } + } + return null; + } + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_AS_RANGE_DELETE, eventDescription = "Deleting AS Range") + public boolean deleteASRange(long id) { + final ASNumberRange asRange = asNumberRangeDao.findById(id); + if (asRange == null) { + throw new CloudRuntimeException(String.format("Could not find a AS range with id: %s", id)); + } + long startASNumber = asRange.getStartASNumber(); + long endASNumber = asRange.getEndASNumber(); + long zoneId = asRange.getDataCenterId(); + List allocatedAsNumbers = asNumberDao.listAllocatedByASRange(asRange.getId()); + if (Objects.nonNull(allocatedAsNumbers) && !allocatedAsNumbers.isEmpty()) { + throw new CloudRuntimeException(String.format("There are %s AS numbers in use from the range %s-%s, cannot remove the range", + allocatedAsNumbers.size(), startASNumber, endASNumber)); + } + try { + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + int removedASNumbers = asNumberDao.removeASRangeNumbers(asRange.getId()); + LOGGER.debug(String.format("Removed %s AS numbers from the range %s-%s", removedASNumbers, + startASNumber, endASNumber)); + asNumberRangeDao.remove(id); + LOGGER.debug(String.format("Removing the AS Number Range %s-%s for the zone %s", startASNumber, + endASNumber, zoneId)); + } + }); + } catch (Exception e) { + String err = String.format("Error removing AS Number range %s-%s for zone %s: %s", + startASNumber, endASNumber, zoneId, e.getMessage()); + LOGGER.error(err, e); + throw new CloudRuntimeException(err); + } + return true; + } + + @Override + public boolean applyBgpPeers(Network network, boolean continueOnError) throws ResourceUnavailableException { + if (!routedIpv4Manager.isDynamicRoutedNetwork(network)) { + return true; + } + final String gatewayProviderStr = ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Network.Service.Gateway); + if (gatewayProviderStr != null) { + NetworkElement provider = networkModel.getElementImplementingProvider(gatewayProviderStr); + if (provider != null && provider instanceof BgpServiceProvider) { + List bgpPeers; + if (network.getVpcId() != null) { + bgpPeers = bgpPeerDao.listNonRevokeByVpcId(network.getVpcId()); + } else { + bgpPeers = bgpPeerDao.listNonRevokeByNetworkId(network.getId()); + } + if (CollectionUtils.isEmpty(bgpPeers)) { + Account owner = accountDao.findByIdIncludingRemoved(network.getAccountId()); + List bgpPeerIds = routedIpv4Manager.getBgpPeerIdsForAccount(owner, network.getDataCenterId()); + bgpPeers = bgpPeerIds.stream() + .map(bgpPeerId -> bgpPeerDao.findById(bgpPeerId)) + .collect(Collectors.toList()); + } + LOGGER.debug(String.format("Applying BPG Peers for network [%s]: [%s]", network, bgpPeers)); + return ((BgpServiceProvider) provider).applyBgpPeers(null, network, bgpPeers); + } + } + return true; + } + + @Override + public boolean applyBgpPeers(Vpc vpc, boolean continueOnError) throws ResourceUnavailableException { + if (!routedIpv4Manager.isDynamicRoutedVpc(vpc)) { + return true; + } + final String gatewayProviderStr = vpcServiceMapDao.getProviderForServiceInVpc(vpc.getId(), Network.Service.Gateway); + if (gatewayProviderStr != null) { + NetworkElement provider = networkModel.getElementImplementingProvider(gatewayProviderStr); + if (provider != null && provider instanceof BgpServiceProvider) { + List bgpPeers = bgpPeerDao.listNonRevokeByVpcId(vpc.getId()); + if (CollectionUtils.isEmpty(bgpPeers)) { + Account owner = accountDao.findByIdIncludingRemoved(vpc.getAccountId()); + List bgpPeerIds = routedIpv4Manager.getBgpPeerIdsForAccount(owner, vpc.getZoneId()); + bgpPeers = bgpPeerIds.stream() + .map(bgpPeerId -> bgpPeerDao.findById(bgpPeerId)) + .collect(Collectors.toList()); + } + LOGGER.debug(String.format("Applying BPG Peers for VPC [%s]: [%s]", vpc, bgpPeers)); + return ((BgpServiceProvider) provider).applyBgpPeers(vpc, null, bgpPeers); + + } + } + return true; + } +} diff --git a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java index d325ae4b95c..421c980b209 100644 --- a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java @@ -22,6 +22,7 @@ import java.net.URI; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import javax.inject.Inject; import javax.naming.ConfigurationException; @@ -709,13 +710,16 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, for (VMInstanceVO vm : vmsByLastHostId) { Float cpuOvercommitRatio = 1.0f; Float ramOvercommitRatio = 1.0f; - long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - vm.getUpdateTime().getTime()) / 1000; + long lastModificationTime = Optional.ofNullable(vm.getUpdateTime()).orElse(vm.getCreated()).getTime(); + long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - lastModificationTime) / 1000; if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { UserVmDetailVO vmDetailCpu = _userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.CPU_OVER_COMMIT_RATIO); UserVmDetailVO vmDetailRam = _userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.MEMORY_OVER_COMMIT_RATIO); if (vmDetailCpu != null) { //if vmDetail_cpu is not null it means it is running in a overcommited cluster. cpuOvercommitRatio = Float.parseFloat(vmDetailCpu.getValue()); + } + if (vmDetailRam != null) { ramOvercommitRatio = Float.parseFloat(vmDetailRam.getValue()); } ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId()); diff --git a/server/src/main/java/com/cloud/configuration/Config.java b/server/src/main/java/com/cloud/configuration/Config.java index 1fb36b65ecf..ce3ac768468 100644 --- a/server/src/main/java/com/cloud/configuration/Config.java +++ b/server/src/main/java/com/cloud/configuration/Config.java @@ -78,7 +78,6 @@ public enum Config { "30000", "Socket I/O timeout value in milliseconds. -1 for infinite timeout.", null), - AlertSMTPUseAuth("Alert", ManagementServer.class, String.class, "alert.smtp.useAuth", null, "If true, use SMTP authentication when sending emails.", null), AlertSMTPUsername( "Alert", ManagementServer.class, @@ -558,7 +557,7 @@ public enum Config { Boolean.class, "disable.extraction", "false", - "Flag for disabling extraction of template, isos and volumes", + "Flag for disabling extraction of templates, isos, snapshots and volumes", null), ExtractURLExpirationInterval( "Advanced", @@ -1366,6 +1365,14 @@ public enum Config { "200", "The default maximum primary storage space (in GiB) that can be used for an account", null), +DefaultMaxAccountProjects( + "Account Defaults", + ManagementServer.class, + Long.class, + "max.account.projects", + "10", + "The default maximum number of projects that can be created for an account", + null), //disabling lb as cluster sync does not work with distributed cluster SubDomainNetworkAccess( @@ -1415,6 +1422,7 @@ public enum Config { DefaultMaxDomainMemory("Domain Defaults", ManagementServer.class, Long.class, "max.domain.memory", "81920", "The default maximum memory (in MB) that can be used for a domain", null), DefaultMaxDomainPrimaryStorage("Domain Defaults", ManagementServer.class, Long.class, "max.domain.primary.storage", "400", "The default maximum primary storage space (in GiB) that can be used for a domain", null), DefaultMaxDomainSecondaryStorage("Domain Defaults", ManagementServer.class, Long.class, "max.domain.secondary.storage", "800", "The default maximum secondary storage space (in GiB) that can be used for a domain", null), + DefaultMaxDomainProjects("Domain Defaults",ManagementServer.class,Long.class,"max.domain.projects","50","The default maximum number of projects that can be created for a domain",null), DefaultMaxProjectUserVms( "Project Defaults", @@ -1547,14 +1555,6 @@ public enum Config { "Password for SMTP authentication (applies only if project.smtp.useAuth is true)", null), ProjectSMTPPort("Project Defaults", ManagementServer.class, Integer.class, "project.smtp.port", "465", "Port the SMTP server is listening on", null), - ProjectSMTPUseAuth( - "Project Defaults", - ManagementServer.class, - String.class, - "project.smtp.useAuth", - null, - "If true, use SMTP authentication when sending emails", - null), ProjectSMTPUsername( "Project Defaults", ManagementServer.class, diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index 04cdbf3ff25..692fc925c78 100644 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -45,17 +45,6 @@ import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; - -import com.cloud.dc.VlanDetailsVO; -import com.cloud.dc.dao.VlanDetailsDao; -import com.cloud.hypervisor.HypervisorGuru; -import com.cloud.network.dao.NsxProviderDao; -import com.cloud.network.element.NsxProviderVO; -import com.cloud.utils.crypt.DBEncryptionUtil; -import com.cloud.host.HostTagVO; -import com.cloud.storage.StoragePoolTagVO; -import com.cloud.storage.VolumeApiServiceImpl; -import com.googlecode.ipv6.IPv6Address; import org.apache.cloudstack.acl.SecurityChecker; import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupService; @@ -136,8 +125,10 @@ import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.userdata.UserDataManager; import org.apache.cloudstack.utils.jsinterpreter.TagAsRuleHelper; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; +import org.apache.cloudstack.vm.UnmanagedVMsManager; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.EnumUtils; @@ -170,6 +161,7 @@ import com.cloud.dc.Pod; import com.cloud.dc.PodVlanMapVO; import com.cloud.dc.Vlan; import com.cloud.dc.Vlan.VlanType; +import com.cloud.dc.VlanDetailsVO; import com.cloud.dc.VlanVO; import com.cloud.dc.dao.AccountVlanMapDao; import com.cloud.dc.dao.ClusterDao; @@ -183,6 +175,7 @@ import com.cloud.dc.dao.DomainVlanMapDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.dc.dao.PodVlanMapDao; import com.cloud.dc.dao.VlanDao; +import com.cloud.dc.dao.VlanDetailsDao; import com.cloud.dc.dao.VsphereStoragePolicyDao; import com.cloud.deploy.DataCenterDeployment; import com.cloud.deploy.DeploymentClusterPlanner; @@ -201,10 +194,12 @@ import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.gpu.GPU; +import com.cloud.host.HostTagVO; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.host.dao.HostTagsDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.HypervisorGuru; import com.cloud.hypervisor.kvm.dpdk.DpdkHelper; import com.cloud.network.IpAddress; import com.cloud.network.IpAddressManager; @@ -227,11 +222,13 @@ import com.cloud.network.dao.IPAddressVO; import com.cloud.network.dao.Ipv6GuestPrefixSubnetNetworkMapDao; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; +import com.cloud.network.dao.NsxProviderDao; import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.dao.PhysicalNetworkTrafficTypeDao; import com.cloud.network.dao.PhysicalNetworkTrafficTypeVO; import com.cloud.network.dao.PhysicalNetworkVO; import com.cloud.network.dao.UserIpv6AddressDao; +import com.cloud.network.element.NsxProviderVO; import com.cloud.network.rules.LoadBalancerContainer.Scheme; import com.cloud.network.vpc.VpcManager; import com.cloud.offering.DiskOffering; @@ -259,7 +256,9 @@ import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.Storage; import com.cloud.storage.Storage.ProvisioningType; import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePoolTagVO; import com.cloud.storage.Volume; +import com.cloud.storage.VolumeApiServiceImpl; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.StoragePoolTagsDao; @@ -279,6 +278,7 @@ import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.UriUtils; import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.DB; import com.cloud.utils.db.EntityManager; import com.cloud.utils.db.Filter; @@ -303,8 +303,13 @@ import com.google.common.base.Enums; import com.google.common.base.MoreObjects; import com.google.common.base.Preconditions; import com.google.common.collect.Sets; +import com.googlecode.ipv6.IPv6Address; import com.googlecode.ipv6.IPv6Network; +import static com.cloud.configuration.Config.SecStorageAllowedInternalDownloadSites; +import static com.cloud.offering.NetworkOffering.RoutingMode.Dynamic; +import static com.cloud.offering.NetworkOffering.RoutingMode.Static; + public class ConfigurationManagerImpl extends ManagerBase implements ConfigurationManager, ConfigurationService, Configurable { public static final String PERACCOUNT = "peraccount"; public static final String PERZONE = "perzone"; @@ -473,9 +478,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati private long _defaultPageSize = Long.parseLong(Config.DefaultPageSize.getDefaultValue()); private static final String DOMAIN_NAME_PATTERN = "^((?!-)[A-Za-z0-9-]{1,63}(? configValuesForValidation; - private Set weightBasedParametersForValidation; - private Set overprovisioningFactorsForValidation; + private Set configValuesForValidation = new HashSet<>(); + private Set weightBasedParametersForValidation = new HashSet<>(); + private Set overprovisioningFactorsForValidation = new HashSet<>(); public static final ConfigKey SystemVMUseLocalStorage = new ConfigKey(Boolean.class, "system.vm.use.local.storage", "Advanced", "false", "Indicates whether to use local storage pools or shared storage pools for system VMs.", false, ConfigKey.Scope.Zone, null); @@ -512,6 +517,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati public static final ConfigKey ALLOW_DOMAIN_ADMINS_TO_CREATE_TAGGED_OFFERINGS = new ConfigKey<>(Boolean.class, "allow.domain.admins.to.create.tagged.offerings", "Advanced", "false", "Allow domain admins to create offerings with tags.", true, ConfigKey.Scope.Account, null); + public static final ConfigKey DELETE_QUERY_BATCH_SIZE = new ConfigKey<>("Advanced", Long.class, "delete.query.batch.size", "0", + "Indicates the limit applied while deleting entries in bulk. With this, the delete query will apply the limit as many times as necessary," + + " to delete all the entries. This is advised when retaining several days of records, which can lead to slowness. <= 0 means that no limit will " + + "be applied. Default value is 0. For now, this is used for deletion of vm & volume stats only.", true); + private static final String IOPS_READ_RATE = "IOPS Read"; private static final String IOPS_WRITE_RATE = "IOPS Write"; private static final String BYTES_READ_RATE = "Bytes Read"; @@ -522,6 +532,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati private static final Set VPC_ONLY_PROVIDERS = Sets.newHashSet(Provider.VPCVirtualRouter, Provider.JuniperContrailVpcRouter, Provider.InternalLbVm); + private static final List SUPPORTED_ROUTING_MODE_STRS = Arrays.asList(Static.toString().toLowerCase(), Dynamic.toString().toLowerCase()); private static final long GiB_TO_BYTES = 1024 * 1024 * 1024; @Override @@ -536,8 +547,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati return true; } - private void populateConfigValuesForValidationSet() { - configValuesForValidation = new HashSet(); + protected void populateConfigValuesForValidationSet() { configValuesForValidation.add("event.purge.interval"); configValuesForValidation.add("account.cleanup.interval"); configValuesForValidation.add("alert.wait"); @@ -563,11 +573,12 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati configValuesForValidation.add(StorageManager.STORAGE_POOL_DISK_WAIT.key()); configValuesForValidation.add(StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.key()); configValuesForValidation.add(StorageManager.STORAGE_POOL_CLIENT_MAX_CONNECTIONS.key()); - configValuesForValidation.add(VM_USERDATA_MAX_LENGTH_STRING); + configValuesForValidation.add(UserDataManager.VM_USERDATA_MAX_LENGTH_STRING); + configValuesForValidation.add(UnmanagedVMsManager.RemoteKvmInstanceDisksCopyTimeout.key()); + configValuesForValidation.add(UnmanagedVMsManager.ConvertVmwareInstanceToKvmTimeout.key()); } - private void weightBasedParametersForValidation() { - weightBasedParametersForValidation = new HashSet(); + protected void weightBasedParametersForValidation() { weightBasedParametersForValidation.add(AlertManager.CPUCapacityThreshold.key()); weightBasedParametersForValidation.add(AlertManager.StorageAllocatedCapacityThreshold.key()); weightBasedParametersForValidation.add(AlertManager.StorageCapacityThreshold.key()); @@ -587,11 +598,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati weightBasedParametersForValidation.add(CapacityManager.SecondaryStorageCapacityThreshold.key()); weightBasedParametersForValidation.add(ClusterDrsService.ClusterDrsImbalanceThreshold.key()); weightBasedParametersForValidation.add(ClusterDrsService.ClusterDrsImbalanceSkipThreshold.key()); - } - private void overProvisioningFactorsForValidation() { - overprovisioningFactorsForValidation = new HashSet(); + protected void overProvisioningFactorsForValidation() { overprovisioningFactorsForValidation.add(CapacityManager.MemOverprovisioningFactor.key()); overprovisioningFactorsForValidation.add(CapacityManager.CpuOverprovisioningFactor.key()); overprovisioningFactorsForValidation.add(CapacityManager.StorageOverprovisioningFactor.key()); @@ -641,7 +650,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } if (err) { - throw new InvalidParameterValueException("Invalid IP address value(s) specified for the config value"); + throw new InvalidParameterValueException("Invalid IP address value(s) specified for the config value."); } } @@ -694,9 +703,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @DB public String updateConfiguration(final long userId, final String name, final String category, String value, final String scope, final Long resourceId) { final String validationMsg = validateConfigurationValue(name, value, scope); - if (validationMsg != null) { - logger.error("Invalid configuration option, name: " + name + ", value:" + value); + logger.error("Invalid value [{}] for configuration [{}] due to [{}].", value, name, validationMsg); throw new InvalidParameterValueException(validationMsg); } @@ -710,7 +718,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati value = DBEncryptionUtil.encrypt(value); } - switch (ConfigKey.Scope.valueOf(scope)) { + ConfigKey.Scope scopeVal = ConfigKey.Scope.valueOf(scope); + switch (scopeVal) { case Zone: final DataCenterVO zone = _zoneDao.findById(resourceId); if (zone == null) { @@ -801,6 +810,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("Scope provided is invalid"); } + _configDepot.invalidateConfigCache(name, scopeVal, resourceId); return valueEncrypted ? DBEncryptionUtil.decrypt(value) : value; } @@ -813,6 +823,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati logger.error("Failed to update configuration option, name: " + name + ", value:" + value); throw new CloudRuntimeException("Failed to update configuration value. Please contact Cloud Support."); } + _configDepot.invalidateConfigCache(name, ConfigKey.Scope.Global, null); PreparedStatement pstmt = null; if (Config.XenServerGuestNetwork.key().equalsIgnoreCase(name)) { @@ -1101,7 +1112,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } String newValue = null; - switch (ConfigKey.Scope.valueOf(scope)) { + ConfigKey.Scope scopeVal = ConfigKey.Scope.valueOf(scope); + switch (scopeVal) { case Zone: final DataCenterVO zone = _zoneDao.findById(id); if (zone == null) { @@ -1186,19 +1198,27 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati newValue = optionalValue.isPresent() ? optionalValue.get().toString() : defaultValue; } + _configDepot.invalidateConfigCache(name, scopeVal, id); + CallContext.current().setEventDetails(" Name: " + name + " New Value: " + (name.toLowerCase().contains("password") ? "*****" : defaultValue == null ? "" : defaultValue)); return new Pair(_configDao.findByName(name), newValue); } - private String validateConfigurationValue(final String name, String value, final String scope) { - + /** + * Validates whether a value is valid for the specified configuration. This includes type and range validation. + * @param name name of the configuration. + * @param value value to validate. + * @param scope scope of the configuration. + * @return null if the value is valid; otherwise, returns an error message. + */ + protected String validateConfigurationValue(String name, String value, String scope) { final ConfigurationVO cfg = _configDao.findByName(name); if (cfg == null) { logger.error("Missing configuration variable " + name + " in configuration table"); return "Invalid configuration variable."; } - final String configScope = cfg.getScope(); + String configScope = cfg.getScope(); if (scope != null) { if (!configScope.contains(scope) && !(ENABLE_ACCOUNT_SETTINGS_FOR_DOMAIN.value() && configScope.contains(ConfigKey.Scope.Account.toString()) && @@ -1207,11 +1227,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati return "Invalid scope id provided for the parameter " + name; } } - Class type = null; - final Config configuration = Config.getConfig(name); + Class type; + Config configuration = Config.getConfig(name); if (configuration == null) { logger.warn("Did not find configuration " + name + " in Config.java. Perhaps moved to ConfigDepot"); - final ConfigKey configKey = _configDepot.get(name); + ConfigKey configKey = _configDepot.get(name); if(configKey == null) { logger.warn("Did not find configuration " + name + " in ConfigDepot too."); return null; @@ -1220,130 +1240,161 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } else { type = configuration.getType(); } - //no need to validate further if a - //config can have null value. - String errMsg = null; + + boolean isTypeValid = validateValueType(value, type); + if (!isTypeValid) { + return String.format("Value [%s] is not a valid [%s].", value, type); + } + + return validateValueRange(name, value, type, configuration); + } + + /** + * Returns whether a value is valid for a configuration of the provided type. + * Valid configuration values are: + * + *
      + *
    • String: any value, including null;
    • + *
    • Character: any value, including null;
    • + *
    • Boolean: strings that equal "true" or "false" (case-sensitive);
    • + *
    • Integer, Short, Long: strings that contain a valid int/short/long;
    • + *
    • Float, Double: strings that contain a valid float/double, except infinity.
    • + *
    + * + * If a type isn't listed here, then the value will be considered invalid. + * @param value value to validate. + * @param type type of the configuration. + * @return boolean indicating whether the value is valid. + */ + protected boolean validateValueType(String value, Class type) { + if (type == String.class || type == Character.class) { + return true; + } + try { - if (type.equals(Integer.class)) { - errMsg = "There was error in trying to parse value: " + value + ". Please enter a valid integer value for parameter " + name; + if (type == Boolean.class) { + return value.equals("true") || value.equals("false"); + } else if (type == Integer.class) { Integer.parseInt(value); - } else if (type.equals(Float.class)) { - errMsg = "There was error in trying to parse value: " + value + ". Please enter a valid float value for parameter " + name; - Float.parseFloat(value); - } else if (type.equals(Long.class)) { - errMsg = "There was error in trying to parse value: " + value + ". Please enter a valid long value for parameter " + name; + } else if (type == Long.class) { Long.parseLong(value); + } else if (type == Short.class) { + Short.parseShort(value); + } else if (type == Float.class) { + float floatValue = Float.parseFloat(value); + return !Float.isInfinite(floatValue); + } else if (type == Double.class) { + double doubleValue = Double.parseDouble(value); + return !Double.isInfinite(doubleValue); + } else { + return false; + } + return true; + } catch (NullPointerException | NumberFormatException e) { + return false; + } + } + + /** + * If the specified configuration contains a range, validates if the value is in that range. If it doesn't contain + * a range, any value is considered valid. + * The value must be previously checked by `validateValueType` so there aren't casting exceptions here. + * @param name name of the configuration. + * @param value value to validate. + * @param type type of the value. + * @param configuration if the configuration uses Config instead of ConfigKey, the Config object; null otherwise. + * @return if the value is valid, returns null; if not, returns an error message. + */ + protected String validateValueRange(String name, String value, Class type, Config configuration) { + if (type.equals(Float.class)) { + Float val = Float.parseFloat(value); + if (overprovisioningFactorsForValidation.contains(name) && val <= 0f) { + return String.format("Value for configuration [%s] should be greater than 0.", name); + } else if (weightBasedParametersForValidation.contains(name) && (val < 0f || val > 1f)) { + return String.format("Please enter a value between 0 and 1 for the configuration parameter: [%s].", name); } - } catch (final Exception e) { - // catching generic exception as some throws NullPointerException and some throws NumberFormatExcpeion - logger.error(errMsg); - return errMsg; } - if (value == null) { - if (type.equals(Boolean.class)) { - return "Please enter either 'true' or 'false'."; - } - if (overprovisioningFactorsForValidation.contains(name)) { - final String msg = "value cannot be null for the parameter " + name; - logger.error(msg); - return msg; - } - return null; - } - - value = value.trim(); - try { - if (overprovisioningFactorsForValidation.contains(name) && Float.parseFloat(value) <= 0f) { - final String msg = name + " should be greater than 0"; - logger.error(msg); - throw new InvalidParameterValueException(msg); - } - } catch (final NumberFormatException e) { - final String msg = "There was an error trying to parse the float value for: " + name; - logger.error(msg); - throw new InvalidParameterValueException(msg); - } - - if (type.equals(Boolean.class)) { - if (!(value.equals("true") || value.equals("false"))) { - logger.error("Configuration variable " + name + " is expecting true or false instead of " + value); - return "Please enter either 'true' or 'false'."; - } - return null; - } - - if (type.equals(Integer.class) && NetworkModel.MACIdentifier.key().equalsIgnoreCase(name)) { - try { - final int val = Integer.parseInt(value); - //The value need to be between 0 to 255 because the mac generation needs a value of 8 bit - //0 value is considered as disable. - if(val < 0 || val > 255){ - throw new InvalidParameterValueException(name+" value should be between 0 and 255. 0 value will disable this feature"); + if (type.equals(Integer.class)) { + int val = Integer.parseInt(value); + if (NetworkModel.MACIdentifier.key().equalsIgnoreCase(name)) { + // The value needs to be between 0 to 255 because the MAC generation needs a value of 8 bits + // 0 is considered as disabled. + if (val < 0 || val > 255){ + return String.format("[%s] value should be between 0 and 255. 0 value will disable this feature.", name); } - } catch (final NumberFormatException e) { - logger.error("There was an error trying to parse the integer value for:" + name); - throw new InvalidParameterValueException("There was an error trying to parse the integer value for:" + name); } - } - - if (type.equals(Integer.class) && configValuesForValidation.contains(name)) { - try { - final int val = Integer.parseInt(value); + if (UnmanagedVMsManager.ThreadsOnMSToImportVMwareVMFiles.key().equalsIgnoreCase(name) || + UnmanagedVMsManager.ThreadsOnKVMHostToImportVMwareVMFiles.key().equalsIgnoreCase(name)) { + if (val < -1 || val > 10) { + return String.format("Please enter a value between -1 and 10 for the configuration parameter: [%s]. -1 will disable it.", name); + } + } else if (configValuesForValidation.contains(name)) { if (val <= 0) { - throw new InvalidParameterValueException("Please enter a positive value for the configuration parameter:" + name); + return String.format("Please enter a positive value for the configuration parameter: [%s].", name); } if ("vm.password.length".equalsIgnoreCase(name) && val < 6) { - throw new InvalidParameterValueException("Please enter a value greater than 5 for the configuration parameter:" + name); + return String.format("Please enter a value greater than 5 for the configuration parameter: [%s].", name); } - if ("remote.access.vpn.psk.length".equalsIgnoreCase(name)) { - if (val < 8) { - throw new InvalidParameterValueException("Please enter a value greater than 7 for the configuration parameter:" + name); - } - if (val > 256) { - throw new InvalidParameterValueException("Please enter a value less than 257 for the configuration parameter:" + name); - } + if ("remote.access.vpn.psk.length".equalsIgnoreCase(name) && (val < 8 || val > 256)) { + return String.format("Please enter a value greater than 7 and less than 257 for the configuration parameter: [%s].", name); } - if (VM_USERDATA_MAX_LENGTH_STRING.equalsIgnoreCase(name)) { - if (val > 1048576) { - throw new InvalidParameterValueException("Please enter a value less than 1048576 for the configuration parameter:" + name); - } + if (UserDataManager.VM_USERDATA_MAX_LENGTH_STRING.equalsIgnoreCase(name) && val > 1048576) { + return String.format("Please enter a value less than 1048577 for the configuration parameter: [%s].", name); } - } catch (final NumberFormatException e) { - logger.error("There was an error trying to parse the integer value for:" + name); - throw new InvalidParameterValueException("There was an error trying to parse the integer value for:" + name); } } - if (type.equals(Float.class)) { - try { - final Float val = Float.parseFloat(value); - if (weightBasedParametersForValidation.contains(name) && (val < 0f || val > 1f)) { - throw new InvalidParameterValueException("Please enter a value between 0 and 1 for the configuration parameter: " + name); + if (type.equals(String.class)) { + if (SecStorageAllowedInternalDownloadSites.key().equalsIgnoreCase(name) && StringUtils.isNotEmpty(value)) { + final String[] cidrs = value.split(","); + for (final String cidr : cidrs) { + if (!NetUtils.isValidIp4(cidr) && !NetUtils.isValidIp6(cidr) && !NetUtils.getCleanIp4Cidr(cidr).equals(cidr)) { + return String.format("Invalid CIDR %s value specified for the config %s.", cidr, name); + } } - } catch (final NumberFormatException e) { - logger.error("There was an error trying to parse the float value for:" + name); - throw new InvalidParameterValueException("There was an error trying to parse the float value for:" + name); } } - if (configuration == null ) { - //range validation has to be done per case basis, for now - //return in case of Configkey parameters + validateIpAddressRelatedConfigValues(name, value); + + if (!shouldValidateConfigRange(name, value, configuration)) { return null; } + String[] range = configuration.getRange().split(","); + if (type.equals(Integer.class)) { + return validateIfIntValueIsInRange(name, value, range[0]); + } + return validateIfStringValueIsInRange(name, value, range); + } + + /** + * Returns a boolean indicating whether a Config's range should be validated. It should not be validated when:
    + *
      + *
    • The value is null;
    • + *
    • The configuration uses ConfigKey instead of Config;
    • + *
    • The Config does not have a specified range.
    • + *
    + */ + protected boolean shouldValidateConfigRange(String name, String value, Config configuration) { + if (value == null) { + logger.debug("Not proceeding with configuration [{}]'s range validation, as its provided value is null.", name); + return false; + } + + if (configuration == null) { + logger.debug("Not proceeding with configuration [{}]'s range validation, as it uses ConfigKey instead of Config.", name); + return false; + } + if (configuration.getRange() == null) { - return null; + logger.debug("Not proceeding with configuration [{}]'s range validation, as it does not have a specified range.", name); + return false; } - String[] range = configuration.getRange().split(","); - if (type.equals(String.class)) { - return validateIfStringValueIsInRange(name, value, range); - } else if (type.equals(Integer.class)) { - return validateIfIntValueIsInRange(name, value, range[0]); - } - return String.format("Invalid value for configuration [%s].", name); + logger.debug("Proceeding with configuration [{}]'s range validation.", name); + return true; } /** @@ -3200,7 +3251,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati cmd.getIopsReadRate(), cmd.getIopsReadRateMax(), cmd.getIopsReadRateMaxLength(), cmd.getIopsWriteRate(), cmd.getIopsWriteRateMax(), cmd.getIopsWriteRateMaxLength(), cmd.getHypervisorSnapshotReserve(), cmd.getCacheMode(), storagePolicyId, cmd.getDynamicScalingEnabled(), diskOfferingId, - cmd.getDiskOfferingStrictness(), cmd.isCustomized(), cmd.getEncryptRoot()); + cmd.getDiskOfferingStrictness(), cmd.isCustomized(), cmd.getEncryptRoot(), cmd.isPurgeResources()); } protected ServiceOfferingVO createServiceOffering(final long userId, final boolean isSystem, final VirtualMachine.Type vmType, @@ -3211,8 +3262,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati Long bytesWriteRate, Long bytesWriteRateMax, Long bytesWriteRateMaxLength, Long iopsReadRate, Long iopsReadRateMax, Long iopsReadRateMaxLength, Long iopsWriteRate, Long iopsWriteRateMax, Long iopsWriteRateMaxLength, - final Integer hypervisorSnapshotReserve, String cacheMode, final Long storagePolicyID, final boolean dynamicScalingEnabled, final Long diskOfferingId, - final boolean diskOfferingStrictness, final boolean isCustomized, final boolean encryptRoot) { + final Integer hypervisorSnapshotReserve, String cacheMode, final Long storagePolicyID, + final boolean dynamicScalingEnabled, final Long diskOfferingId, final boolean diskOfferingStrictness, + final boolean isCustomized, final boolean encryptRoot, final boolean purgeResources) { // Filter child domains when both parent and child domains are present List filteredDomainIds = filterChildSubDomains(domainIds); @@ -3247,7 +3299,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati limitResourceUse, volatileVm, displayText, isSystem, vmType, hostTag, deploymentPlanner, dynamicScalingEnabled, isCustomized); - List detailsVO = new ArrayList(); + List detailsVOList = new ArrayList(); if (details != null) { // To have correct input, either both gpu card name and VGPU type should be passed or nothing should be passed. // Use XOR condition to verify that. @@ -3281,22 +3333,25 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // Add in disk offering details continue; } - detailsVO.add(new ServiceOfferingDetailsVO(serviceOffering.getId(), detailEntry.getKey(), detailEntryValue, true)); + detailsVOList.add(new ServiceOfferingDetailsVO(serviceOffering.getId(), detailEntry.getKey(), detailEntryValue, true)); } } if (storagePolicyID != null) { - detailsVO.add(new ServiceOfferingDetailsVO(serviceOffering.getId(), ApiConstants.STORAGE_POLICY, String.valueOf(storagePolicyID), false)); + detailsVOList.add(new ServiceOfferingDetailsVO(serviceOffering.getId(), ApiConstants.STORAGE_POLICY, String.valueOf(storagePolicyID), false)); + } + if (purgeResources) { + detailsVOList.add(new ServiceOfferingDetailsVO(serviceOffering.getId(), + ServiceOffering.PURGE_DB_ENTITIES_KEY, Boolean.TRUE.toString(), false)); } serviceOffering.setDiskOfferingStrictness(diskOfferingStrictness); DiskOfferingVO diskOffering = null; if (diskOfferingId == null) { - diskOffering = createDiskOfferingInternal(userId, isSystem, vmType, - name, cpu, ramSize, speed, displayText, typedProvisioningType, localStorageRequired, - offerHA, limitResourceUse, volatileVm, tags, domainIds, zoneIds, hostTag, - networkRate, deploymentPlanner, details, rootDiskSizeInGiB, isCustomizedIops, minIops, maxIops, + diskOffering = createDiskOfferingInternal( + name, displayText, typedProvisioningType, localStorageRequired, + tags, details, rootDiskSizeInGiB, isCustomizedIops, minIops, maxIops, bytesReadRate, bytesReadRateMax, bytesReadRateMaxLength, bytesWriteRate, bytesWriteRateMax, bytesWriteRateMaxLength, iopsReadRate, iopsReadRateMax, iopsReadRateMaxLength, @@ -3304,6 +3359,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati hypervisorSnapshotReserve, cacheMode, storagePolicyID, encryptRoot); } else { diskOffering = _diskOfferingDao.findById(diskOfferingId); + String diskStoragePolicyId = diskOfferingDetailsDao.getDetail(diskOfferingId, ApiConstants.STORAGE_POLICY); + if (storagePolicyID != null && diskStoragePolicyId != null) { + throw new InvalidParameterValueException("Storage policy cannot be defined on both compute and disk offering"); + } } if (diskOffering != null) { serviceOffering.setDiskOfferingId(diskOffering.getId()); @@ -3313,18 +3372,18 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if ((serviceOffering = _serviceOfferingDao.persist(serviceOffering)) != null) { for (Long domainId : filteredDomainIds) { - detailsVO.add(new ServiceOfferingDetailsVO(serviceOffering.getId(), ApiConstants.DOMAIN_ID, String.valueOf(domainId), false)); + detailsVOList.add(new ServiceOfferingDetailsVO(serviceOffering.getId(), ApiConstants.DOMAIN_ID, String.valueOf(domainId), false)); } if (CollectionUtils.isNotEmpty(zoneIds)) { for (Long zoneId : zoneIds) { - detailsVO.add(new ServiceOfferingDetailsVO(serviceOffering.getId(), ApiConstants.ZONE_ID, String.valueOf(zoneId), false)); + detailsVOList.add(new ServiceOfferingDetailsVO(serviceOffering.getId(), ApiConstants.ZONE_ID, String.valueOf(zoneId), false)); } } - if (CollectionUtils.isNotEmpty(detailsVO)) { - for (ServiceOfferingDetailsVO detail : detailsVO) { + if (CollectionUtils.isNotEmpty(detailsVOList)) { + for (ServiceOfferingDetailsVO detail : detailsVOList) { detail.setResourceId(serviceOffering.getId()); } - _serviceOfferingDetailsDao.saveDetails(detailsVO); + _serviceOfferingDetailsDao.saveDetails(detailsVOList); } CallContext.current().setEventDetails("Service offering id=" + serviceOffering.getId()); @@ -3343,10 +3402,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } - private DiskOfferingVO createDiskOfferingInternal(final long userId, final boolean isSystem, final VirtualMachine.Type vmType, - final String name, final Integer cpu, final Integer ramSize, final Integer speed, final String displayText, final ProvisioningType typedProvisioningType, final boolean localStorageRequired, - final boolean offerHA, final boolean limitResourceUse, final boolean volatileVm, String tags, final List domainIds, List zoneIds, final String hostTag, - final Integer networkRate, final String deploymentPlanner, final Map details, Long rootDiskSizeInGiB, final Boolean isCustomizedIops, Long minIops, Long maxIops, + private DiskOfferingVO createDiskOfferingInternal(final String name, final String displayText, final ProvisioningType typedProvisioningType, final boolean localStorageRequired, + String tags, final Map details, Long rootDiskSizeInGiB, final Boolean isCustomizedIops, Long minIops, Long maxIops, Long bytesReadRate, Long bytesReadRateMax, Long bytesReadRateMaxLength, Long bytesWriteRate, Long bytesWriteRateMax, Long bytesWriteRateMaxLength, Long iopsReadRate, Long iopsReadRateMax, Long iopsReadRateMaxLength, @@ -3407,8 +3464,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati diskOffering.setHypervisorSnapshotReserve(hypervisorSnapshotReserve); if ((diskOffering = _diskOfferingDao.persist(diskOffering)) != null) { - if (details != null && !details.isEmpty()) { - List diskDetailsVO = new ArrayList(); + if ((details != null && !details.isEmpty()) || (storagePolicyID != null)) { + List diskDetailsVO = new ArrayList<>(); // Support disk offering details for below parameters if (details.containsKey(Volume.BANDWIDTH_LIMIT_IN_MBPS)) { diskDetailsVO.add(new DiskOfferingDetailVO(diskOffering.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS, details.get(Volume.BANDWIDTH_LIMIT_IN_MBPS), false)); @@ -3416,6 +3473,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (details.containsKey(Volume.IOPS_LIMIT)) { diskDetailsVO.add(new DiskOfferingDetailVO(diskOffering.getId(), Volume.IOPS_LIMIT, details.get(Volume.IOPS_LIMIT), false)); } + + if (storagePolicyID != null) { + diskDetailsVO.add(new DiskOfferingDetailVO(diskOffering.getId(), ApiConstants.STORAGE_POLICY, String.valueOf(storagePolicyID), false)); + } + if (!diskDetailsVO.isEmpty()) { diskOfferingDetailsDao.saveDetails(diskDetailsVO); } @@ -3481,6 +3543,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati String storageTags = cmd.getStorageTags(); String hostTags = cmd.getHostTags(); ServiceOffering.State state = cmd.getState(); + boolean purgeResources = cmd.isPurgeResources(); if (userId == null) { userId = Long.valueOf(User.UID_SYSTEM); @@ -3498,6 +3561,12 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati List existingZoneIds = _serviceOfferingDetailsDao.findZoneIds(id); Collections.sort(existingZoneIds); + String purgeResourceStr = _serviceOfferingDetailsDao.getDetail(id, ServiceOffering.PURGE_DB_ENTITIES_KEY); + boolean existingPurgeResources = false; + if (StringUtils.isNotBlank(purgeResourceStr)) { + existingPurgeResources = Boolean.parseBoolean(purgeResourceStr); + } + // check if valid domain if (CollectionUtils.isNotEmpty(domainIds)) { for (final Long domainId: domainIds) { @@ -3566,7 +3635,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } final boolean updateNeeded = name != null || displayText != null || sortKey != null || storageTags != null || hostTags != null || state != null; - final boolean detailsUpdateNeeded = !filteredDomainIds.equals(existingDomainIds) || !filteredZoneIds.equals(existingZoneIds); + final boolean detailsUpdateNeeded = !filteredDomainIds.equals(existingDomainIds) || + !filteredZoneIds.equals(existingZoneIds) || purgeResources != existingPurgeResources; if (!updateNeeded && !detailsUpdateNeeded) { return _serviceOfferingDao.findById(id); } @@ -3625,6 +3695,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati detailsVO.add(new ServiceOfferingDetailsVO(id, ApiConstants.ZONE_ID, String.valueOf(zoneId), false)); } } + if (purgeResources != existingPurgeResources) { + sc.setParameters("detailName", ServiceOffering.PURGE_DB_ENTITIES_KEY); + _serviceOfferingDetailsDao.remove(sc); + if (purgeResources) { + detailsVO.add(new ServiceOfferingDetailsVO(id, ServiceOffering.PURGE_DB_ENTITIES_KEY, + "true", false)); + } + } } if (!detailsVO.isEmpty()) { for (ServiceOfferingDetailsVO detailVO : detailsVO) { @@ -4782,6 +4860,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati newIp6Gateway = MoreObjects.firstNonNull(newIp6Gateway, network.getIp6Gateway()); newIp6Cidr = MoreObjects.firstNonNull(newIp6Cidr, network.getIp6Cidr()); _networkModel.checkIp6Parameters(newIp6StartIp, newIp6EndIp, newIp6Gateway, newIp6Cidr); + if (!GuestType.Shared.equals(network.getGuestType())) { + _networkModel.checkIp6CidrSizeEqualTo64(newIp6Cidr); + } return true; } return false; @@ -5260,6 +5341,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati endIpv6 = ObjectUtils.allNull(endIpv6, currentEndIPv6) ? null : MoreObjects.firstNonNull(endIpv6, currentEndIPv6); _networkModel.checkIp6Parameters(startIpv6, endIpv6, ip6Gateway, ip6Cidr); + final Network network = _networkModel.getNetwork(vlanRange.getNetworkId()); + if (!GuestType.Shared.equals(network.getGuestType())) { + _networkModel.checkIp6CidrSizeEqualTo64(ip6Cidr); + } if (!ObjectUtils.allNull(startIpv6, endIpv6) && ObjectUtils.anyNull(startIpv6, endIpv6)) { throw new InvalidParameterValueException(String.format("Invalid IPv6 range %s-%s", startIpv6, endIpv6)); @@ -6055,7 +6140,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati Boolean forVpc = cmd.getForVpc(); Boolean forNsx = cmd.isForNsx(); Boolean forTungsten = cmd.getForTungsten(); - String nsxMode = cmd.getNsxMode(); + String networkModeStr = cmd.getNetworkMode(); boolean nsxSupportInternalLbSvc = cmd.getNsxSupportsInternalLbService(); Integer maxconn = null; boolean enableKeepAlive = false; @@ -6063,6 +6148,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati final List domainIds = cmd.getDomainIds(); final List zoneIds = cmd.getZoneIds(); final boolean enable = cmd.getEnable(); + boolean specifyAsNumber = cmd.getSpecifyAsNumber(); + String routingModeString = cmd.getRoutingMode(); // check if valid domain if (CollectionUtils.isNotEmpty(domainIds)) { for (final Long domainId: domainIds) { @@ -6094,20 +6181,12 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("Network Offering cannot be for both Tungsten-Fabric and NSX"); } - if (Boolean.TRUE.equals(forNsx)) { - if (Objects.isNull(nsxMode)) { - throw new InvalidParameterValueException("Mode for an NSX offering needs to be specified. Valid values: " + Arrays.toString(NetworkOffering.NsxMode.values())); - } - if (!EnumUtils.isValidEnum(NetworkOffering.NsxMode.class, nsxMode)) { - throw new InvalidParameterValueException("Invalid mode passed. Valid values: " + Arrays.toString(NetworkOffering.NsxMode.values())); - } - } else { - if (Objects.nonNull(nsxMode)) { - if (logger.isTraceEnabled()) { - logger.trace("nsxMode has is ignored for non-NSX enabled zones"); - } - nsxMode = null; + NetworkOffering.NetworkMode networkMode = null; + if (networkModeStr != null) { + if (!EnumUtils.isValidEnum(NetworkOffering.NetworkMode.class, networkModeStr)) { + throw new InvalidParameterValueException("Invalid mode passed. Valid values: " + Arrays.toString(NetworkOffering.NetworkMode.values())); } + networkMode = NetworkOffering.NetworkMode.valueOf(networkModeStr); } // Verify traffic type @@ -6169,6 +6248,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati _networkSvc.validateIfServiceOfferingIsActiveAndSystemVmTypeIsDomainRouter(serviceOfferingId); } + NetworkOffering.RoutingMode routingMode = verifyRoutingMode(routingModeString); + // configure service provider map final Map> serviceProviderMap = new HashMap>(); final Set defaultProviders = new HashSet(); @@ -6338,6 +6419,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati serviceCapabilityMap.put(Service.StaticNat, staticNatServiceCapabilityMap); serviceCapabilityMap.put(Service.Connectivity, connectivityServiceCapabilityMap); + final Map gatewayServiceCapabilityMap = cmd.getServiceCapabilities(Service.Gateway); + if (MapUtils.isNotEmpty(gatewayServiceCapabilityMap)) { + serviceCapabilityMap.put(Service.Gateway, gatewayServiceCapabilityMap); + } + // if Firewall service is missing, add Firewall service/provider // combination if (firewallProvider != null) { @@ -6374,7 +6460,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } final NetworkOfferingVO offering = createNetworkOffering(name, displayText, trafficType, tags, specifyVlan, availability, networkRate, serviceProviderMap, false, guestType, false, - serviceOfferingId, conserveMode, serviceCapabilityMap, specifyIpRanges, isPersistent, details, egressDefaultPolicy, maxconn, enableKeepAlive, forVpc, forTungsten, forNsx, nsxMode, domainIds, zoneIds, enable, internetProtocol); + serviceOfferingId, conserveMode, serviceCapabilityMap, specifyIpRanges, isPersistent, details, egressDefaultPolicy, maxconn, enableKeepAlive, forVpc, forTungsten, forNsx, networkMode, domainIds, zoneIds, enable, internetProtocol, routingMode, specifyAsNumber); if (Boolean.TRUE.equals(forNsx) && nsxSupportInternalLbSvc) { offering.setInternalLb(true); offering.setPublicLb(false); @@ -6385,6 +6471,23 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati return offering; } + public static NetworkOffering.RoutingMode verifyRoutingMode(String routingModeString) { + NetworkOffering.RoutingMode routingMode = null; + if (routingModeString != null) { + try { + if (!SUPPORTED_ROUTING_MODE_STRS.contains(routingModeString.toLowerCase())) { + throw new IllegalArgumentException(String.format("Unsupported value: %s", routingModeString)); + } + routingMode = routingModeString.equalsIgnoreCase(Static.toString()) ? Static : Dynamic; + } catch (IllegalArgumentException e) { + String msg = String.format("Invalid value %s for Routing Mode, Supported values: %s, %s.", + routingModeString, Static, Dynamic); + throw new InvalidParameterValueException(msg); + } + } + return routingMode; + } + void validateLoadBalancerServiceCapabilities(final Map lbServiceCapabilityMap) { if (lbServiceCapabilityMap != null && !lbServiceCapabilityMap.isEmpty()) { if (lbServiceCapabilityMap.keySet().size() > 4 || !lbServiceCapabilityMap.containsKey(Capability.SupportedLBIsolation)) { @@ -6524,7 +6627,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati final Long serviceOfferingId, final boolean conserveMode, final Map> serviceCapabilityMap, final boolean specifyIpRanges, final boolean isPersistent, final Map details, final boolean egressDefaultPolicy, final Integer maxconn, final boolean enableKeepAlive, Boolean forVpc, - Boolean forTungsten, boolean forNsx, String mode, final List domainIds, final List zoneIds, final boolean enableOffering, final NetUtils.InternetProtocol internetProtocol) { + Boolean forTungsten, boolean forNsx, NetworkOffering.NetworkMode networkMode, final List domainIds, final List zoneIds, final boolean enableOffering, final NetUtils.InternetProtocol internetProtocol, + final NetworkOffering.RoutingMode routingMode, final boolean specifyAsNumber) { String servicePackageUuid; String spDescription = null; @@ -6555,11 +6659,63 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } + if (specifyAsNumber && !forNsx) { + String msg = "SpecifyAsNumber can only be true for network offerings for NSX"; + logger.error(msg); + throw new InvalidParameterValueException(msg); + } + + if (specifyAsNumber && !Dynamic.equals(routingMode)) { + String msg = "SpecifyAsNumber can only be true for Dynamic Route Mode network offerings"; + logger.error(msg); + throw new InvalidParameterValueException(msg); + } + + if (specifyAsNumber && Boolean.TRUE.equals(forVpc)) { + String msg = "SpecifyAsNumber cannot be set for VPC network tiers. It needs to be defined at VPC level"; + logger.error(msg); + throw new InvalidParameterValueException(msg); + } + // isPersistent should always be false for Shared network Offerings if (isPersistent && type == GuestType.Shared) { throw new InvalidParameterValueException("isPersistent should be false if network offering's type is " + type); } + // Validate network mode + if (networkMode != null) { + if (type != GuestType.Isolated) { + throw new InvalidParameterValueException("networkMode should be set only for Isolated network offerings"); + } + if (NetworkOffering.NetworkMode.ROUTED.equals(networkMode)) { + boolean useVirtualRouterOnly = true; + for (Service service : serviceProviderMap.keySet()) { + Set providers = serviceProviderMap.get(service); + if (Arrays.asList(Service.SourceNat, Service.StaticNat, Service.Lb, Service.PortForwarding, Service.Vpn).contains(service)) { + if (providers != null) { + throw new InvalidParameterValueException("SourceNat/StaticNat/Lb/PortForwarding/Vpn service are not supported in ROUTED mode"); + } + } + if (useVirtualRouterOnly && Arrays.asList(Service.Firewall, Service.NetworkACL).contains(service)) { + for (Provider provider : providers) { + if (!Provider.VirtualRouter.equals(provider) && !Provider.VPCVirtualRouter.equals(provider)) { + useVirtualRouterOnly = false; + break; + } + } + } + } + if (useVirtualRouterOnly) { + // Add VirtualRouter/VPCVirtualRouter as provider of Gateway service + if (forVpc) { + serviceProviderMap.put(Service.Gateway, Sets.newHashSet(Provider.VPCVirtualRouter)); + } else { + serviceProviderMap.put(Service.Gateway, Sets.newHashSet(Provider.VirtualRouter)); + } + } + } + } + // validate availability value if (availability == NetworkOffering.Availability.Required) { final boolean canOffBeRequired = type == GuestType.Isolated && serviceProviderMap.containsKey(Service.SourceNat); @@ -6636,9 +6792,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } final Map sourceNatServiceCapabilityMap = serviceCapabilityMap.get(Service.SourceNat); - if (sourceNatServiceCapabilityMap != null && !sourceNatServiceCapabilityMap.isEmpty()) { + if (MapUtils.isNotEmpty(sourceNatServiceCapabilityMap)) { sharedSourceNat = isSharedSourceNat(serviceProviderMap, sourceNatServiceCapabilityMap); - redundantRouter = isRedundantRouter(serviceProviderMap, sourceNatServiceCapabilityMap); + redundantRouter = isRedundantRouter(serviceProviderMap.get(Service.SourceNat), Service.SourceNat, sourceNatServiceCapabilityMap); + } + + final Map gatewayServiceCapabilityMap = serviceCapabilityMap.get(Service.Gateway); + if (MapUtils.isNotEmpty(gatewayServiceCapabilityMap)) { + redundantRouter = redundantRouter || isRedundantRouter(serviceProviderMap.get(Service.Gateway), Service.Gateway, gatewayServiceCapabilityMap); } final Map staticNatServiceCapabilityMap = serviceCapabilityMap.get(Service.StaticNat); @@ -6686,14 +6847,17 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati offeringFinal.setForTungsten(Objects.requireNonNullElse(forTungsten, false)); offeringFinal.setForNsx(Objects.requireNonNullElse(forNsx, false)); - if (Boolean.TRUE.equals(forNsx)) { - offeringFinal.setNsxMode(mode); - } + offeringFinal.setNetworkMode(networkMode); if (enableOffering) { offeringFinal.setState(NetworkOffering.State.Enabled); } + offeringFinal.setSpecifyAsNumber(specifyAsNumber); + if (routingMode != null) { + offeringFinal.setRoutingMode(routingMode); + } + // Set VM AutoScaling capability offeringFinal.setSupportsVmAutoScaling(vmAutoScaling); @@ -6792,11 +6956,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati }); } - boolean isRedundantRouter(Map> serviceProviderMap, Map sourceNatServiceCapabilityMap) { + boolean isRedundantRouter(Set providers, Service service, Map sourceNatServiceCapabilityMap) { boolean redundantRouter = false; String param = sourceNatServiceCapabilityMap.get(Capability.RedundantRouter); if (param != null) { - _networkModel.checkCapabilityForProvider(serviceProviderMap.get(Service.SourceNat), Service.SourceNat, Capability.RedundantRouter, param); + _networkModel.checkCapabilityForProvider(providers, service, Capability.RedundantRouter, param); redundantRouter = param.contains("true"); } return redundantRouter; @@ -6874,6 +7038,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati final String tags = cmd.getTags(); final Boolean isTagged = cmd.isTagged(); final Boolean forVpc = cmd.getForVpc(); + final String routingMode = cmd.getRoutingMode(); if (domainId != null) { Domain domain = _entityMgr.findById(Domain.class, domainId); @@ -6947,6 +7112,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } + if (routingMode != null && EnumUtils.isValidEnumIgnoreCase(NetworkOffering.RoutingMode.class, routingMode)) { + sc.addAnd("routingMode", SearchCriteria.Op.EQ, routingMode); + } + // Don't return system network offerings to the user sc.addAnd("systemOnly", SearchCriteria.Op.EQ, false); @@ -7845,9 +8014,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Override public ConfigKey[] getConfigKeys() { return new ConfigKey[] {SystemVMUseLocalStorage, IOPS_MAX_READ_LENGTH, IOPS_MAX_WRITE_LENGTH, - BYTES_MAX_READ_LENGTH, BYTES_MAX_WRITE_LENGTH, ADD_HOST_ON_SERVICE_RESTART_KVM, SET_HOST_DOWN_TO_MAINTENANCE, VM_SERVICE_OFFERING_MAX_CPU_CORES, - VM_SERVICE_OFFERING_MAX_RAM_SIZE, VM_USERDATA_MAX_LENGTH, MIGRATE_VM_ACROSS_CLUSTERS, ENABLE_ACCOUNT_SETTINGS_FOR_DOMAIN, - ENABLE_DOMAIN_SETTINGS_FOR_CHILD_DOMAIN, ALLOW_DOMAIN_ADMINS_TO_CREATE_TAGGED_OFFERINGS, AllowNonRFC1918CompliantIPs + BYTES_MAX_READ_LENGTH, BYTES_MAX_WRITE_LENGTH, ADD_HOST_ON_SERVICE_RESTART_KVM, SET_HOST_DOWN_TO_MAINTENANCE, + VM_SERVICE_OFFERING_MAX_CPU_CORES, VM_SERVICE_OFFERING_MAX_RAM_SIZE, MIGRATE_VM_ACROSS_CLUSTERS, + ENABLE_ACCOUNT_SETTINGS_FOR_DOMAIN, ENABLE_DOMAIN_SETTINGS_FOR_CHILD_DOMAIN, + ALLOW_DOMAIN_ADMINS_TO_CREATE_TAGGED_OFFERINGS, DELETE_QUERY_BATCH_SIZE, AllowNonRFC1918CompliantIPs }; } diff --git a/server/src/main/java/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java b/server/src/main/java/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java index 2e45b0f745b..76e019df1b3 100644 --- a/server/src/main/java/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java +++ b/server/src/main/java/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java @@ -22,7 +22,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.consoleproxy.ConsoleAccessManager; - import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.security.keys.KeysManager; import org.apache.cloudstack.framework.security.keystore.KeystoreManager; @@ -31,6 +30,7 @@ import com.cloud.agent.AgentManager; import com.cloud.agent.api.GetVncPortAnswer; import com.cloud.agent.api.GetVncPortCommand; import com.cloud.agent.api.StartupProxyCommand; +import com.cloud.deploy.DeploymentPlanner; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.info.ConsoleProxyInfo; @@ -40,7 +40,9 @@ import com.cloud.utils.component.ManagerBase; import com.cloud.vm.ConsoleProxyVO; import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.ConsoleProxyDao; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; @@ -116,7 +118,7 @@ public class AgentBasedConsoleProxyManager extends ManagerBase implements Consol _consoleProxyPort = NumbersUtil.parseInt(value, ConsoleProxyManager.DEFAULT_PROXY_VNC_PORT); } - value = configs.get("consoleproxy.sslEnabled"); + value = configs.get(ConsoleProxySslEnabled.key()); if (value != null && value.equalsIgnoreCase("true")) { _sslEnabled = true; } @@ -180,6 +182,11 @@ public class AgentBasedConsoleProxyManager extends ManagerBase implements Consol return null; } + @Override + public void startProxyForHA(VirtualMachine vm, Map params, + DeploymentPlanner planner) { + } + @Override public boolean destroyProxy(long proxyVmId) { return false; diff --git a/server/src/main/java/com/cloud/consoleproxy/AgentHookBase.java b/server/src/main/java/com/cloud/consoleproxy/AgentHookBase.java index fdbacb5c8c2..4ba0d7fe89a 100644 --- a/server/src/main/java/com/cloud/consoleproxy/AgentHookBase.java +++ b/server/src/main/java/com/cloud/consoleproxy/AgentHookBase.java @@ -213,7 +213,7 @@ public abstract class AgentHookBase implements AgentHook { byte[] ksBits = null; String consoleProxyUrlDomain = _configDao.getValue(Config.ConsoleProxyUrlDomain.key()); - String consoleProxySslEnabled = _configDao.getValue("consoleproxy.sslEnabled"); + String consoleProxySslEnabled = _configDao.getValue(ConsoleProxyManager.ConsoleProxySslEnabled.key()); if (!StringUtils.isEmpty(consoleProxyUrlDomain) && !StringUtils.isEmpty(consoleProxySslEnabled) && consoleProxySslEnabled.equalsIgnoreCase("true")) { ksBits = _ksMgr.getKeystoreBits(ConsoleProxyManager.CERTIFICATE_NAME, ConsoleProxyManager.CERTIFICATE_NAME, storePassword); diff --git a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManager.java b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManager.java index 6280495fb1a..88f3b30f96f 100644 --- a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManager.java +++ b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManager.java @@ -16,11 +16,20 @@ // under the License. package com.cloud.consoleproxy; -import com.cloud.utils.component.Manager; -import com.cloud.vm.ConsoleProxyVO; +import java.util.Map; import org.apache.cloudstack.framework.config.ConfigKey; +import com.cloud.deploy.DeploymentPlanner; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.utils.component.Manager; +import com.cloud.vm.ConsoleProxyVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; + public interface ConsoleProxyManager extends Manager, ConsoleProxyService { int DEFAULT_PROXY_CAPACITY = 50; @@ -36,6 +45,9 @@ public interface ConsoleProxyManager extends Manager, ConsoleProxyService { String ALERT_SUBJECT = "proxy-alert"; String CERTIFICATE_NAME = "CPVMCertificate"; + ConfigKey ConsoleProxySslEnabled = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Boolean.class, "consoleproxy.sslEnabled", "false", + "Enable SSL for console proxy", false); + ConfigKey NoVncConsoleDefault = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Boolean.class, "novnc.console.default", "true", "If true, noVNC console will be default console for virtual machines", true); @@ -50,6 +62,10 @@ public interface ConsoleProxyManager extends Manager, ConsoleProxyService { ConsoleProxyVO startProxy(long proxyVmId, boolean ignoreRestartSetting); + void startProxyForHA(VirtualMachine vm, Map params, DeploymentPlanner planner) + throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, + OperationTimedoutException; + boolean stopProxy(long proxyVmId); boolean rebootProxy(long proxyVmId); diff --git a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index 028ecd31b63..53f76f8ad42 100644 --- a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -74,6 +74,9 @@ import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.deploy.DataCenterDeployment; import com.cloud.deploy.DeployDestination; +import com.cloud.deploy.DeploymentPlanner; +import com.cloud.event.ActionEvent; +import com.cloud.event.EventTypes; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientAddressCapacityException; import com.cloud.exception.InsufficientCapacityException; @@ -491,6 +494,14 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy return null; } + @Override + @ActionEvent(eventType = EventTypes.EVENT_PROXY_START, eventDescription = "restarting console proxy VM for HA", async = true) + public void startProxyForHA(VirtualMachine vm, Map params, + DeploymentPlanner planner) throws InsufficientCapacityException, ResourceUnavailableException, + ConcurrentOperationException, OperationTimedoutException { + virtualMachineManager.advanceStart(vm.getUuid(), params, planner); + } + public ConsoleProxyVO assignProxyFromRunningPool(long dataCenterId) { if (logger.isDebugEnabled()) { @@ -1118,7 +1129,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy Map configs = configurationDao.getConfiguration("management-server", params); - String value = configs.get("consoleproxy.sslEnabled"); + String value = configs.get(ConsoleProxySslEnabled.key()); if (value != null && value.equalsIgnoreCase("true")) { sslEnabled = true; } @@ -1607,7 +1618,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] { NoVncConsoleDefault, NoVncConsoleSourceIpCheckEnabled }; + return new ConfigKey[] { ConsoleProxySslEnabled, NoVncConsoleDefault, NoVncConsoleSourceIpCheckEnabled }; } protected ConsoleProxyStatus parseJsonToConsoleProxyStatus(String json) throws JsonParseException { diff --git a/server/src/main/java/com/cloud/consoleproxy/StaticConsoleProxyManager.java b/server/src/main/java/com/cloud/consoleproxy/StaticConsoleProxyManager.java index bb2b426bf82..29a7497fc17 100644 --- a/server/src/main/java/com/cloud/consoleproxy/StaticConsoleProxyManager.java +++ b/server/src/main/java/com/cloud/consoleproxy/StaticConsoleProxyManager.java @@ -72,7 +72,7 @@ public class StaticConsoleProxyManager extends AgentBasedConsoleProxyManager imp _ip = "127.0.0.1"; } - String value = (String)params.get("consoleproxy.sslEnabled"); + String value = (String)params.get(ConsoleProxySslEnabled.key()); if (value != null && value.equalsIgnoreCase("true")) { _sslEnabled = true; } diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index d97fcef7453..19760e6d025 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -19,6 +19,7 @@ package com.cloud.deploy; import static com.cloud.utils.NumbersUtil.toHumanReadableSize; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -35,6 +36,8 @@ import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.cpu.CPU; +import com.cloud.vm.UserVmManager; import org.apache.cloudstack.affinity.AffinityGroupDomainMapVO; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; @@ -270,6 +273,8 @@ StateListener, Configurable { _affinityProcessors = affinityProcessors; } + private static final List clusterArchTypes = Arrays.asList(CPU.CPUArch.amd64, CPU.CPUArch.arm64); + protected void avoidOtherClustersForDeploymentIfMigrationDisabled(VirtualMachine vm, Host lastHost, ExcludeList avoids) { if (lastHost == null || lastHost.getClusterId() == null || ConfigurationManagerImpl.MIGRATE_VM_ACROSS_CLUSTERS.valueIn(vm.getDataCenterId())) { @@ -282,7 +287,7 @@ StateListener, Configurable { boolean storageMigrationNeededDuringClusterMigration = false; for (Volume volume : volumes) { StoragePoolVO pool = _storagePoolDao.findById(volume.getPoolId()); - if (List.of(ScopeType.HOST, ScopeType.CLUSTER).contains(pool.getScope())) { + if (pool != null && List.of(ScopeType.HOST, ScopeType.CLUSTER).contains(pool.getScope())) { storageMigrationNeededDuringClusterMigration = true; break; } @@ -328,6 +333,7 @@ StateListener, Configurable { logger.debug("ROOT volume [{}] {} to deploy VM [{}].", () -> getRootVolumeUuid(_volsDao.findByInstance(vm.getId())), () -> plan.getPoolId() != null ? "is ready" : "is not ready", vm::getUuid); avoidDisabledResources(vmProfile, dc, avoids); + avoidDifferentArchResources(vmProfile, dc, avoids); String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag); String uefiFlag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.UefiFlag); @@ -454,14 +460,29 @@ StateListener, Configurable { return dest; } + private void avoidDifferentArchResources(VirtualMachineProfile vmProfile, DataCenter dc, ExcludeList avoids) { + VirtualMachineTemplate template = vmProfile.getTemplate(); + for (CPU.CPUArch arch : clusterArchTypes) { + if (arch.equals(template.getArch())) { + continue; + } + List avoidClusters = _clusterDao.listClustersByArchAndZoneId(dc.getId(), arch); + if (CollectionUtils.isNotEmpty(avoidClusters)) { + logger.debug("Excluding {} clusters as they are {} arch, conflicting with the requested arch {}", + avoidClusters.size(), arch.getType(), template.getArch().getType()); + List clusterIds = avoidClusters.stream().map(x -> x.getId()).collect(Collectors.toList()); + avoids.addClusterList(clusterIds); + } + } + } + private DeployDestination deployInVmLastHost(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids, DeploymentPlanner planner, VirtualMachine vm, DataCenter dc, ServiceOffering offering, int cpuRequested, long ramRequested, boolean volumesRequireEncryption) throws InsufficientServerCapacityException { HostVO host = _hostDao.findById(vm.getLastHostId()); - _hostDao.loadHostTags(host); - _hostDao.loadDetails(host); - if (canUseLastHost(host, avoids, plan, vm, offering, volumesRequireEncryption)) { + _hostDao.loadHostTags(host); + _hostDao.loadDetails(host); if (host.getStatus() != Status.Up) { logger.debug("Cannot deploy VM [{}] to the last host [{}] because this host is not in UP state or is not enabled. Host current status [{}] and resource status [{}].", vm.getUuid(), host.getUuid(), host.getState().name(), host.getResourceState()); @@ -497,7 +518,7 @@ StateListener, Configurable { Pod pod = _podDao.findById(host.getPodId()); Cluster cluster = _clusterDao.findById(host.getClusterId()); if (vm.getHypervisorType() == HypervisorType.BareMetal) { - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap(), displayStorage); + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<>(), displayStorage); logger.debug("Returning Deployment Destination: {}.", dest); return dest; } @@ -518,7 +539,7 @@ StateListener, Configurable { logger.debug("Cannot find suitable storage pools in host [{}] to deploy VM [{}]", host.getUuid(), vm.getUuid()); return null; } - List suitableHosts = new ArrayList(); + List suitableHosts = new ArrayList<>(); suitableHosts.add(host); Pair> potentialResources = findPotentialDeploymentResources( suitableHosts, suitableVolumeStoragePools, avoids, @@ -611,7 +632,7 @@ StateListener, Configurable { boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); if (vm.getHypervisorType() == HypervisorType.BareMetal) { - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap(), + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<>(), displayStorage); logger.debug("Returning Deployment Destination: {}.", dest); return dest; @@ -626,7 +647,7 @@ StateListener, Configurable { List readyAndReusedVolumes = result.second(); if (!suitableVolumeStoragePools.isEmpty()) { - List suitableHosts = new ArrayList(); + List suitableHosts = new ArrayList<>(); suitableHosts.add(host); Pair> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner, vmProfile, plan, avoids), @@ -778,7 +799,7 @@ StateListener, Configurable { VirtualMachineTemplate template = vmProfile.getTemplate(); if (offering.getHostTag() != null || template.getTemplateTag() != null) { _hostDao.loadHostTags(host); - if (!host.checkHostServiceOfferingAndTemplateTags(offering, template)) { + if (!host.checkHostServiceOfferingAndTemplateTags(offering, template, UserVmManager.getStrictHostTags())) { logger.debug("Service Offering host tag or template tag does not match the last host of this VM"); return false; } @@ -868,9 +889,9 @@ StateListener, Configurable { long vmAccountId = vm.getAccountId(); long vmDomainId = vm.getDomainId(); - List allPodsFromDedicatedID = new ArrayList(); - List allClustersFromDedicatedID = new ArrayList(); - List allHostsFromDedicatedID = new ArrayList(); + List allPodsFromDedicatedID = new ArrayList<>(); + List allClustersFromDedicatedID = new ArrayList<>(); + List allHostsFromDedicatedID = new ArrayList<>(); List domainGroupMappings = _affinityGroupDomainMapDao.listByDomain(vmDomainId); @@ -1000,7 +1021,7 @@ StateListener, Configurable { final PlannerResourceUsage hostResourceTypeFinal = hostResourceType; // reserve the host for required resourceType // let us lock the reservation entry before updating. - return Transaction.execute(new TransactionCallback() { + return Transaction.execute(new TransactionCallback<>() { @Override public Boolean doInTransaction(TransactionStatus status) { final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); @@ -1092,7 +1113,7 @@ StateListener, Configurable { final long id = reservationEntry.getId(); - return Transaction.execute(new TransactionCallback() { + return Transaction.execute(new TransactionCallback<>() { @Override public Boolean doInTransaction(TransactionStatus status) { final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); @@ -1308,7 +1329,7 @@ StateListener, Configurable { List suitableHosts = findSuitableHosts(vmProfile, potentialPlan, avoid, HostAllocator.RETURN_UPTO_ALL); // if found suitable hosts in this cluster, find suitable storage // pools for each volume of the VM - if (suitableHosts != null && !suitableHosts.isEmpty()) { + if (CollectionUtils.isNotEmpty(suitableHosts)) { if (vmProfile.getHypervisorType() == HypervisorType.BareMetal) { DeployDestination dest = new DeployDestination(dc, pod, clusterVO, suitableHosts.get(0)); return dest; @@ -1454,7 +1475,7 @@ StateListener, Configurable { } } - return new Pair(requiresShared, requiresLocal); + return new Pair<>(requiresShared, requiresLocal); } protected Pair> findPotentialDeploymentResources(List suitableHosts, Map> suitableVolumeStoragePools, @@ -1466,10 +1487,10 @@ StateListener, Configurable { boolean hostAffinityCheck = false; if (readyAndReusedVolumes == null) { - readyAndReusedVolumes = new ArrayList(); + readyAndReusedVolumes = new ArrayList<>(); } - Map storage = new HashMap(); - TreeSet volumesOrderBySizeDesc = new TreeSet(new Comparator() { + Map storage = new HashMap<>(); + TreeSet volumesOrderBySizeDesc = new TreeSet<>(new Comparator<>() { @Override public int compare(Volume v1, Volume v2) { if (v1.getSize() < v2.getSize()) @@ -1482,7 +1503,7 @@ StateListener, Configurable { boolean multipleVolume = volumesOrderBySizeDesc.size() > 1; boolean deployAsIs = isDeployAsIs(vm); for (Host potentialHost : suitableHosts) { - Map> volumeAllocationMap = new HashMap>(); + Map> volumeAllocationMap = new HashMap<>(); if (deployAsIs) { storage = new HashMap<>(); // Find the common suitable pools @@ -1554,7 +1575,7 @@ StateListener, Configurable { if (volumeAllocationMap.containsKey(potentialSPool)) requestVolumes = volumeAllocationMap.get(potentialSPool); else - requestVolumes = new ArrayList(); + requestVolumes = new ArrayList<>(); requestVolumes.add(vol); List> volumeDiskProfilePair = getVolumeDiskProfilePairs(requestVolumes); if (potentialHost.getHypervisorType() == HypervisorType.VMware) { @@ -1604,7 +1625,7 @@ StateListener, Configurable { logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + potentialHost.getName() + " and associated storage pools for this VM"); volumeAllocationMap.clear(); - return new Pair>(potentialHost, storage); + return new Pair<>(potentialHost, storage); } else { logger.debug("Adding host [{}] to the avoid set because: can access Pool [{}], has enough space [{}], affinity check [{}], fits planner [{}] usage [{}].", potentialHost.getUuid(), hostCanAccessPool, haveEnoughSpace, hostAffinityCheck, resourceUsageRequired.getClass().getSimpleName(), hostFitsPlannerUsage); @@ -1652,25 +1673,33 @@ StateListener, Configurable { } logger.debug("Host: " + host.getId() + (hostCanAccessSPool ? " can" : " cannot") + " access pool: " + pool.getId()); + if (!hostCanAccessSPool) { + if (_storageMgr.canHostPrepareStoragePoolAccess(host, pool)) { + logger.debug("Host: " + host.getId() + " can prepare access to pool: " + pool.getId()); + hostCanAccessSPool = true; + } else { + logger.debug("Host: " + host.getId() + " cannot prepare access to pool: " + pool.getId()); + } + } + return hostCanAccessSPool; } protected List findSuitableHosts(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { - List suitableHosts = new ArrayList(); + List suitableHosts = new ArrayList<>(); for (HostAllocator allocator : _hostAllocators) { suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, avoid, returnUpTo); - if (suitableHosts != null && !suitableHosts.isEmpty()) { + if (CollectionUtils.isNotEmpty(suitableHosts)) { break; } } - if (suitableHosts.isEmpty()) { - logger.debug("No suitable hosts found"); + if (CollectionUtils.isEmpty(suitableHosts)) { + logger.debug("No suitable hosts found."); + } else { + reorderHostsByPriority(plan.getHostPriorities(), suitableHosts); } - // re-order hosts by priority - reorderHostsByPriority(plan.getHostPriorities(), suitableHosts); - return suitableHosts; } @@ -1699,8 +1728,8 @@ StateListener, Configurable { protected Pair>, List> findSuitablePoolsForVolumes(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { List volumesTobeCreated = _volsDao.findUsableVolumesForInstance(vmProfile.getId()); - Map> suitableVolumeStoragePools = new HashMap>(); - List readyAndReusedVolumes = new ArrayList(); + Map> suitableVolumeStoragePools = new HashMap<>(); + List readyAndReusedVolumes = new ArrayList<>(); // There should be at least the ROOT volume of the VM in usable state if (volumesTobeCreated.isEmpty()) { @@ -1785,7 +1814,7 @@ StateListener, Configurable { } } - HashSet toRemove = new HashSet(); + HashSet toRemove = new HashSet<>(); for (List lsp : suitableVolumeStoragePools.values()) { for (StoragePool sp : lsp) { toRemove.add(sp.getId()); @@ -1801,7 +1830,7 @@ StateListener, Configurable { logger.debug("No suitable pools found"); } - return new Pair>, List>(suitableVolumeStoragePools, readyAndReusedVolumes); + return new Pair<>(suitableVolumeStoragePools, readyAndReusedVolumes); } private boolean tryToFindPotentialPoolsToAlocateVolume(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, @@ -1825,7 +1854,7 @@ StateListener, Configurable { Map> suitableVolumeStoragePools, List readyAndReusedVolumes, VolumeVO toBeCreated) { logger.debug("Volume [{}] of VM [{}] has pool [{}] already specified. Checking if this pool can be reused.", toBeCreated.getUuid(), vmProfile.getUuid(), toBeCreated.getPoolId()); - List suitablePools = new ArrayList(); + List suitablePools = new ArrayList<>(); StoragePool pool = null; if (toBeCreated.getPoolId() != null) { pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId()); @@ -1952,7 +1981,7 @@ StateListener, Configurable { final VirtualMachine vm = vmProfile.getVirtualMachine(); final long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId()); - return Transaction.execute(new TransactionCallback() { + return Transaction.execute(new TransactionCallback<>() { @Override public String doInTransaction(TransactionStatus status) { boolean saveReservation = true; @@ -1978,7 +2007,7 @@ StateListener, Configurable { if (planner != null) { vmReservation.setDeploymentPlanner(planner.getName()); } - Map volumeReservationMap = new HashMap(); + Map volumeReservationMap = new HashMap<>(); if (vm.getHypervisorType() != HypervisorType.BareMetal) { for (Volume vo : plannedDestination.getStorageForDisks().keySet()) { diff --git a/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java b/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java index 22b9a33f65b..46e6c369c33 100644 --- a/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java +++ b/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java @@ -138,7 +138,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla return null; } - List clusterList = new ArrayList(); + List clusterList = new ArrayList<>(); if (plan.getClusterId() != null) { Long clusterIdSpecified = plan.getClusterId(); logger.debug("Searching resources only under specified Cluster: " + clusterIdSpecified); @@ -209,7 +209,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla } private void reorderClustersBasedOnImplicitTags(List clusterList, int requiredCpu, long requiredRam) { - final HashMap UniqueTagsInClusterMap = new HashMap(); + final HashMap UniqueTagsInClusterMap = new HashMap<>(); Long uniqueTags; for (Long clusterId : clusterList) { uniqueTags = (long) 0; @@ -220,7 +220,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla } UniqueTagsInClusterMap.put(clusterId, uniqueTags); } - Collections.sort(clusterList, new Comparator() { + Collections.sort(clusterList, new Comparator<>() { @Override public int compare(Long o1, Long o2) { Long t1 = UniqueTagsInClusterMap.get(o1); @@ -249,7 +249,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla int requiredCpu = offering.getCpu() * offering.getSpeed(); long requiredRam = offering.getRamSize() * 1024L * 1024L; //list pods under this zone by cpu and ram capacity - List prioritizedPodIds = new ArrayList(); + List prioritizedPodIds; Pair, Map> podCapacityInfo = listPodsByCapacity(plan.getDataCenterId(), requiredCpu, requiredRam); List podsWithCapacity = podCapacityInfo.first(); @@ -277,7 +277,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla return null; } - List clusterList = new ArrayList(); + List clusterList = new ArrayList<>(); //loop over pods for (Long podId : prioritizedPodIds) { logger.debug("Checking resources under Pod: " + podId); @@ -298,7 +298,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla private Map getCapacityThresholdMap() { // Lets build this real time so that the admin won't have to restart MS // if anyone changes these values - Map disableThresholdMap = new HashMap(); + Map disableThresholdMap = new HashMap<>(); String cpuDisableThresholdString = ClusterCPUCapacityDisableThreshold.value().toString(); float cpuDisableThreshold = NumbersUtil.parseFloat(cpuDisableThresholdString, 0.85F); @@ -312,7 +312,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla } private List getCapacitiesForCheckingThreshold() { - List capacityList = new ArrayList(); + List capacityList = new ArrayList<>(); capacityList.add(Capacity.CAPACITY_TYPE_CPU); capacityList.add(Capacity.CAPACITY_TYPE_MEMORY); return capacityList; @@ -339,7 +339,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla } List capacityList = getCapacitiesForCheckingThreshold(); - List clustersCrossingThreshold = new ArrayList(); + List clustersCrossingThreshold = new ArrayList<>(); ServiceOffering offering = vmProfile.getServiceOffering(); int cpu_requested = offering.getCpu() * offering.getSpeed(); @@ -523,7 +523,7 @@ public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPla matchingClusters.addAll(hostDao.findClustersThatMatchHostTagRule(hostTagOnOffering)); if (matchingClusters.isEmpty()) { - logger.error(String.format("No suitable host found for the following compute offering tags [%s].", hostTagOnOffering)); + logger.error("No suitable host found for the following compute offering tags [{}].", hostTagOnOffering); throw new CloudRuntimeException("No suitable host found."); } diff --git a/server/src/main/java/com/cloud/event/ActionEventInterceptor.java b/server/src/main/java/com/cloud/event/ActionEventInterceptor.java index 30ff61b3e7f..9554ae34cb6 100644 --- a/server/src/main/java/com/cloud/event/ActionEventInterceptor.java +++ b/server/src/main/java/com/cloud/event/ActionEventInterceptor.java @@ -70,7 +70,7 @@ public class ActionEventInterceptor implements ComponentMethodInterceptor, Metho if (async) { CallContext ctx = CallContext.current(); - String eventDescription = getEventDescription(actionEvent, ctx); + String eventDescription = getEventDescription(actionEvent, ctx, true); Long eventResourceId = getEventResourceId(actionEvent, ctx); String eventResourceType = getEventResourceType(actionEvent, ctx); String eventType = getEventType(actionEvent, ctx); @@ -88,13 +88,13 @@ public class ActionEventInterceptor implements ComponentMethodInterceptor, Metho for (ActionEvent actionEvent : getActionEvents(method)) { CallContext ctx = CallContext.current(); long userId = ctx.getCallingUserId(); - long accountId = ctx.getProject() != null ? ctx.getProject().getProjectAccountId() : ctx.getCallingAccountId(); //This should be the entity owner id rather than the Calling User Account Id. long startEventId = ctx.getStartEventId(); String eventDescription = getEventDescription(actionEvent, ctx); Long eventResourceId = getEventResourceId(actionEvent, ctx); String eventResourceType = getEventResourceType(actionEvent, ctx); String eventType = getEventType(actionEvent, ctx); boolean isEventDisplayEnabled = ctx.isEventDisplayEnabled(); + long accountId = ActionEventUtils.getOwnerAccountId(ctx, eventType, ctx.getCallingAccountId()); if (eventType.equals("")) return; @@ -118,13 +118,13 @@ public class ActionEventInterceptor implements ComponentMethodInterceptor, Metho for (ActionEvent actionEvent : getActionEvents(method)) { CallContext ctx = CallContext.current(); long userId = ctx.getCallingUserId(); - long accountId = ctx.getCallingAccountId(); long startEventId = ctx.getStartEventId(); String eventDescription = getEventDescription(actionEvent, ctx); Long eventResourceId = getEventResourceId(actionEvent, ctx); String eventResourceType = getEventResourceType(actionEvent, ctx); String eventType = getEventType(actionEvent, ctx); boolean isEventDisplayEnabled = ctx.isEventDisplayEnabled(); + long accountId = ActionEventUtils.getOwnerAccountId(ctx, eventType, ctx.getCallingAccountId()); if (eventType.equals("")) return; @@ -183,19 +183,24 @@ public class ActionEventInterceptor implements ComponentMethodInterceptor, Metho return type == null ? actionEvent.eventType() : type; } - protected String getEventDescription(ActionEvent actionEvent, CallContext ctx) { + protected String getEventDescription(ActionEvent actionEvent, CallContext ctx, boolean capitalizeFirstLetter) { String eventDescription = ctx.getEventDescription(); if (eventDescription == null) { eventDescription = actionEvent.eventDescription(); } - if (ctx.getEventDetails() != null) { eventDescription += ". " + ctx.getEventDetails(); } - + if (capitalizeFirstLetter && StringUtils.isNotBlank(eventDescription)) { + eventDescription = eventDescription.substring(0, 1).toUpperCase() + eventDescription.substring(1); + } return eventDescription; } + protected String getEventDescription(ActionEvent actionEvent, CallContext ctx) { + return getEventDescription(actionEvent, ctx, false); + } + protected Long getEventResourceId(ActionEvent actionEvent, CallContext ctx) { Long resourceId = ctx.getEventResourceId(); if (resourceId != null) { diff --git a/server/src/main/java/com/cloud/event/ActionEventUtils.java b/server/src/main/java/com/cloud/event/ActionEventUtils.java index 8ea93684877..ae77446a856 100644 --- a/server/src/main/java/com/cloud/event/ActionEventUtils.java +++ b/server/src/main/java/com/cloud/event/ActionEventUtils.java @@ -22,6 +22,7 @@ import java.lang.reflect.Method; import java.text.SimpleDateFormat; import java.util.Date; import java.util.HashMap; +import java.util.List; import java.util.Map; import javax.annotation.PostConstruct; @@ -32,12 +33,11 @@ import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.framework.events.EventBus; -import org.apache.cloudstack.framework.events.EventBusException; +import org.apache.cloudstack.framework.events.EventDistributor; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.springframework.beans.factory.NoSuchBeanDefinitionException; import com.cloud.configuration.Config; @@ -63,7 +63,7 @@ public class ActionEventUtils { private static AccountDao s_accountDao; private static ProjectDao s_projectDao; protected static UserDao s_userDao; - protected static EventBus s_eventBus = null; + private static EventDistributor eventDistributor; protected static EntityManager s_entityMgr; protected static ConfigurationDao s_configDao; @@ -101,8 +101,10 @@ public class ActionEventUtils { public static Long onActionEvent(Long userId, Long accountId, Long domainId, String type, String description, Long resourceId, String resourceType) { Ternary resourceDetails = getResourceDetails(resourceId, resourceType, type); - publishOnEventBus(userId, accountId, EventCategory.ACTION_EVENT.getName(), type, com.cloud.event.Event.State.Completed, description, resourceDetails.second(), resourceDetails.third()); - Event event = persistActionEvent(userId, accountId, domainId, null, type, Event.State.Completed, true, description, resourceDetails.first(), resourceDetails.third(), null); + Event event = persistActionEvent(userId, accountId, domainId, null, type, Event.State.Completed, + true, description, resourceDetails.first(), resourceDetails.third(), null); + publishOnEventBus(event, userId, accountId, domainId, EventCategory.ACTION_EVENT.getName(), type, + com.cloud.event.Event.State.Completed, description, resourceDetails.second(), resourceDetails.third()); return event.getId(); } @@ -111,8 +113,12 @@ public class ActionEventUtils { */ public static Long onScheduledActionEvent(Long userId, Long accountId, String type, String description, Long resourceId, String resourceType, boolean eventDisplayEnabled, long startEventId) { Ternary resourceDetails = getResourceDetails(resourceId, resourceType, type); - publishOnEventBus(userId, accountId, EventCategory.ACTION_EVENT.getName(), type, com.cloud.event.Event.State.Scheduled, description, resourceDetails.second(), resourceDetails.third()); - Event event = persistActionEvent(userId, accountId, null, null, type, Event.State.Scheduled, eventDisplayEnabled, description, resourceDetails.first(), resourceDetails.third(), startEventId); + CallContext ctx = CallContext.current(); + accountId = getOwnerAccountId(ctx, type, accountId); + Event event = persistActionEvent(userId, accountId, null, null, type, Event.State.Scheduled, + eventDisplayEnabled, description, resourceDetails.first(), resourceDetails.third(), startEventId); + publishOnEventBus(event, userId, accountId, EventCategory.ACTION_EVENT.getName(), type, + com.cloud.event.Event.State.Scheduled, description, resourceDetails.second(), resourceDetails.third()); return event.getId(); } @@ -124,7 +130,7 @@ public class ActionEventUtils { public static void onStartedActionEventFromContext(String eventType, String eventDescription, Long resourceId, String resourceType, boolean eventDisplayEnabled) { CallContext ctx = CallContext.current(); long userId = ctx.getCallingUserId(); - long accountId = ctx.getProject() != null ? ctx.getProject().getProjectAccountId() : ctx.getCallingAccountId(); //This should be the entity owner id rather than the Calling User Account Id. + long accountId = getOwnerAccountId(ctx, eventType, ctx.getCallingAccountId()); long startEventId = ctx.getStartEventId(); if (!eventType.equals("")) @@ -136,8 +142,10 @@ public class ActionEventUtils { */ public static Long onStartedActionEvent(Long userId, Long accountId, String type, String description, Long resourceId, String resourceType, boolean eventDisplayEnabled, long startEventId) { Ternary resourceDetails = getResourceDetails(resourceId, resourceType, type); - publishOnEventBus(userId, accountId, EventCategory.ACTION_EVENT.getName(), type, com.cloud.event.Event.State.Started, description, resourceDetails.second(), resourceDetails.third()); - Event event = persistActionEvent(userId, accountId, null, null, type, Event.State.Started, eventDisplayEnabled, description, resourceDetails.first(), resourceDetails.third(), startEventId); + Event event = persistActionEvent(userId, accountId, null, null, type, Event.State.Started, + eventDisplayEnabled, description, resourceDetails.first(), resourceDetails.third(), startEventId); + publishOnEventBus(event, userId, accountId, EventCategory.ACTION_EVENT.getName(), type, + com.cloud.event.Event.State.Started, description, resourceDetails.second(), resourceDetails.third()); return event.getId(); } @@ -148,16 +156,20 @@ public class ActionEventUtils { public static Long onCompletedActionEvent(Long userId, Long accountId, String level, String type, boolean eventDisplayEnabled, String description, Long resourceId, String resourceType, long startEventId) { Ternary resourceDetails = getResourceDetails(resourceId, resourceType, type); - publishOnEventBus(userId, accountId, EventCategory.ACTION_EVENT.getName(), type, com.cloud.event.Event.State.Completed, description, resourceDetails.second(), resourceDetails.third()); - Event event = persistActionEvent(userId, accountId, null, level, type, Event.State.Completed, eventDisplayEnabled, description, resourceDetails.first(), resourceDetails.third(), startEventId); + Event event = persistActionEvent(userId, accountId, null, level, type, Event.State.Completed, + eventDisplayEnabled, description, resourceDetails.first(), resourceDetails.third(), startEventId); + publishOnEventBus(event, userId, accountId, EventCategory.ACTION_EVENT.getName(), type, + com.cloud.event.Event.State.Completed, description, resourceDetails.second(), resourceDetails.third()); return event.getId(); } public static Long onCreatedActionEvent(Long userId, Long accountId, String level, String type, boolean eventDisplayEnabled, String description, Long resourceId, String resourceType) { Ternary resourceDetails = getResourceDetails(resourceId, resourceType, type); - publishOnEventBus(userId, accountId, EventCategory.ACTION_EVENT.getName(), type, com.cloud.event.Event.State.Created, description, resourceDetails.second(), resourceDetails.third()); - Event event = persistActionEvent(userId, accountId, null, level, type, Event.State.Created, eventDisplayEnabled, description, resourceDetails.first(), resourceDetails.third(), null); + Event event = persistActionEvent(userId, accountId, null, level, type, Event.State.Created, + eventDisplayEnabled, description, resourceDetails.first(), resourceDetails.third(), null); + publishOnEventBus(event, userId, accountId, EventCategory.ACTION_EVENT.getName(), type, + com.cloud.event.Event.State.Created, description, resourceDetails.second(), resourceDetails.third()); return event.getId(); } @@ -193,20 +205,25 @@ public class ActionEventUtils { return event; } - private static void publishOnEventBus(long userId, long accountId, String eventCategory, String eventType, Event.State state, String description, String resourceUuid, String resourceType) { + private static void publishOnEventBus(Event eventRecord, long userId, long accountId, Long domainId, + String eventCategory, String eventType, Event.State state, String description, String resourceUuid, + String resourceType) { String configKey = Config.PublishActionEvent.key(); String value = s_configDao.getValue(configKey); boolean configValue = Boolean.parseBoolean(value); if(!configValue) return; + try { - s_eventBus = ComponentContext.getComponent(EventBus.class); + eventDistributor = ComponentContext.getComponent(EventDistributor.class); } catch (NoSuchBeanDefinitionException nbe) { return; // no provider is configured to provide events bus, so just return } org.apache.cloudstack.framework.events.Event event = - new org.apache.cloudstack.framework.events.Event(ManagementService.Name, eventCategory, eventType, resourceType, resourceUuid); + new org.apache.cloudstack.framework.events.Event(ManagementService.Name, eventCategory, eventType, resourceType, resourceUuid); + event.setEventId(eventRecord.getId()); + event.setEventUuid(eventRecord.getUuid()); Map eventDescription = new HashMap(); Project project = s_projectDao.findByProjectAccountId(accountId); @@ -219,6 +236,9 @@ public class ActionEventUtils { return; if (project != null) eventDescription.put("project", project.getUuid()); + event.setResourceAccountId(accountId); + event.setResourceAccountUuid(account.getUuid()); + event.setResourceDomainId(domainId == null ? account.getDomainId() : domainId); eventDescription.put("user", user.getUuid()); eventDescription.put("account", account.getUuid()); eventDescription.put("event", eventType); @@ -234,11 +254,13 @@ public class ActionEventUtils { event.setDescription(eventDescription); - try { - s_eventBus.publish(event); - } catch (EventBusException e) { - LOGGER.warn("Failed to publish action event on the event bus."); - } + eventDistributor.publish(event); + } + + private static void publishOnEventBus(Event event, long userId, long accountId, String eventCategory, + String eventType, Event.State state, String description, String resourceUuid, String resourceType) { + publishOnEventBus(event, userId, accountId, null, eventCategory, eventType, state, description, + resourceUuid, resourceType); } private static Ternary getResourceDetailsUsingEntityClassAndContext(Class entityClass, ApiCommandResourceType resourceType) { @@ -394,7 +416,11 @@ public class ActionEventUtils { LOGGER.trace("Caught exception while populating first class entities for event bus, moving on"); } } - } + public static long getOwnerAccountId(CallContext ctx, String eventType, long callingAccountId) { + List mainProjectEvents = List.of(EventTypes.EVENT_PROJECT_CREATE, EventTypes.EVENT_PROJECT_UPDATE, EventTypes.EVENT_PROJECT_DELETE); + long accountId = ctx.getProject() != null && !mainProjectEvents.stream().anyMatch(eventType::equalsIgnoreCase) ? ctx.getProject().getProjectAccountId() : callingAccountId; //This should be the entity owner id rather than the Calling User Account Id. + return accountId; + } } diff --git a/server/src/main/java/com/cloud/event/AlertGenerator.java b/server/src/main/java/com/cloud/event/AlertGenerator.java index 27698f27862..f1b23e87308 100644 --- a/server/src/main/java/com/cloud/event/AlertGenerator.java +++ b/server/src/main/java/com/cloud/event/AlertGenerator.java @@ -25,15 +25,13 @@ import java.util.Map; import javax.annotation.PostConstruct; import javax.inject.Inject; -import org.apache.logging.log4j.Logger; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.events.EventDistributor; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.springframework.beans.factory.NoSuchBeanDefinitionException; import org.springframework.stereotype.Component; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.framework.events.EventBus; -import org.apache.cloudstack.framework.events.EventBusException; - import com.cloud.configuration.Config; import com.cloud.dc.DataCenterVO; import com.cloud.dc.HostPodVO; @@ -48,8 +46,8 @@ public class AlertGenerator { protected static Logger LOGGER = LogManager.getLogger(AlertGenerator.class); private static DataCenterDao s_dcDao; private static HostPodDao s_podDao; - protected static EventBus s_eventBus = null; protected static ConfigurationDao s_configDao; + protected static EventDistributor eventDistributor; @Inject DataCenterDao dcDao; @@ -76,9 +74,9 @@ public class AlertGenerator { if(!configValue) return; try { - s_eventBus = ComponentContext.getComponent(EventBus.class); + eventDistributor = ComponentContext.getComponent(EventDistributor.class); } catch (NoSuchBeanDefinitionException nbe) { - return; // no provider is configured to provide events bus, so just return + return; // no provider is configured to provide events distributor, so just return } org.apache.cloudstack.framework.events.Event event = @@ -107,10 +105,6 @@ public class AlertGenerator { event.setDescription(eventDescription); - try { - s_eventBus.publish(event); - } catch (EventBusException e) { - LOGGER.warn("Failed to publish alert on the event bus."); - } + eventDistributor.publish(event); } } diff --git a/server/src/main/java/com/cloud/event/dao/EventJoinDaoImpl.java b/server/src/main/java/com/cloud/event/dao/EventJoinDaoImpl.java index f51df27a741..b4316051e43 100644 --- a/server/src/main/java/com/cloud/event/dao/EventJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/event/dao/EventJoinDaoImpl.java @@ -131,6 +131,10 @@ public class EventJoinDaoImpl extends GenericDaoBase implemen @Override public List searchByIds(Long... ids) { + // return empty collection if there are no ids. + if (ids.length == 0) { + return List.of(); + } SearchCriteria sc = vrSearch.create(); sc.setParameters("idIN", ids); return searchIncludingRemoved(sc, null, null, false); diff --git a/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java b/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java index b815f21e206..d8fc99a0934 100644 --- a/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java +++ b/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java @@ -28,6 +28,8 @@ import java.util.concurrent.TimeUnit; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; @@ -64,13 +66,14 @@ import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.network.VpcVirtualNetworkApplianceService; import com.cloud.resource.ResourceManager; import com.cloud.server.ManagementServer; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StorageManager; import com.cloud.storage.VolumeVO; -import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.dao.GuestOSCategoryDao; import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.VolumeDao; @@ -79,6 +82,7 @@ import com.cloud.user.AccountManager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.UserVmManager; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineManager; @@ -142,6 +146,10 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur VolumeDao volumeDao; @Inject DataStoreProviderManager dataStoreProviderMgr; + @Inject + VpcVirtualNetworkApplianceService routerService; + @Inject + UserVmManager userVmManager; long _serverId; @@ -435,6 +443,36 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur } + private void startVm(VirtualMachine vm, Map params, + DeploymentPlanner planner) throws InsufficientCapacityException, ResourceUnavailableException, + ConcurrentOperationException, OperationTimedoutException { + CallContext ctx = CallContext.register(CallContext.current(), ApiCommandResourceType.VirtualMachine); + ctx.setEventResourceId(vm.getId()); + try { + switch (vm.getType()) { + case DomainRouter: + ctx.setEventResourceType(ApiCommandResourceType.DomainRouter); + routerService.startRouterForHA(vm, params, planner); + break; + case ConsoleProxy: + ctx.setEventResourceType(ApiCommandResourceType.ConsoleProxy); + consoleProxyManager.startProxyForHA(vm, params, planner); + break; + case SecondaryStorageVm: + ctx.setEventResourceType(ApiCommandResourceType.SystemVm); + secondaryStorageVmManager.startSecStorageVmForHA(vm, params, planner); + break; + case User: + userVmManager.startVirtualMachineForHA(vm, params, planner); + break; + default: + _itMgr.advanceStart(vm.getUuid(), params, planner); + } + } finally { + CallContext.unregister(); + } + } + protected Long restart(final HaWorkVO work) { logger.debug("RESTART with HAWORK"); List items = _haDao.listFutureHaWorkForVm(work.getInstanceId(), work.getId()); @@ -626,10 +664,10 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur } } // First try starting the vm with its original planner, if it doesn't succeed send HAPlanner as its an emergency. - _itMgr.advanceStart(vm.getUuid(), params, null); - }catch (InsufficientCapacityException e){ + startVm(vm, params, null); + } catch (InsufficientCapacityException e){ logger.warn("Failed to deploy vm " + vmId + " with original planner, sending HAPlanner"); - _itMgr.advanceStart(vm.getUuid(), params, _haPlanners.get(0)); + startVm(vm, params, _haPlanners.get(0)); } VMInstanceVO started = _instanceDao.findById(vm.getId()); @@ -651,7 +689,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur } catch (final ResourceUnavailableException e) { logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + - hostDesc, "The Storage is unavailable for trying to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); + hostDesc, "The resource is unavailable for trying to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); } catch (ConcurrentOperationException e) { logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + @@ -659,7 +697,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur } catch (OperationTimedoutException e) { logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + - hostDesc, "The Storage is unavailable for trying to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); + hostDesc, "The operation timed out while trying to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); } vm = _itMgr.findById(vm.getId()); work.setUpdateTime(vm.getUpdated()); @@ -1092,4 +1130,9 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur StopRetryInterval, RestartRetryInterval, MigrateRetryInterval, InvestigateRetryInterval, HAWorkers, ForceHA, KvmHAFenceHostIfHeartbeatFailsOnStorage}; } + + @Override + public int expungeWorkItemsByVmList(List vmIds, Long batchSize) { + return _haDao.expungeByVmList(vmIds, batchSize); + } } diff --git a/server/src/main/java/com/cloud/ha/dao/HighAvailabilityDao.java b/server/src/main/java/com/cloud/ha/dao/HighAvailabilityDao.java index e8a3e17f805..f6539105d78 100644 --- a/server/src/main/java/com/cloud/ha/dao/HighAvailabilityDao.java +++ b/server/src/main/java/com/cloud/ha/dao/HighAvailabilityDao.java @@ -85,4 +85,5 @@ public interface HighAvailabilityDao extends GenericDao { List listPendingHaWorkForVm(long vmId); List listPendingMigrationsForVm(long vmId); + int expungeByVmList(List vmIds, Long batchSize); } diff --git a/server/src/main/java/com/cloud/ha/dao/HighAvailabilityDaoImpl.java b/server/src/main/java/com/cloud/ha/dao/HighAvailabilityDaoImpl.java index 357796a6a70..c722c6376c1 100644 --- a/server/src/main/java/com/cloud/ha/dao/HighAvailabilityDaoImpl.java +++ b/server/src/main/java/com/cloud/ha/dao/HighAvailabilityDaoImpl.java @@ -19,7 +19,7 @@ package com.cloud.ha.dao; import java.util.Date; import java.util.List; - +import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; import com.cloud.ha.HaWorkVO; @@ -258,4 +258,16 @@ public class HighAvailabilityDaoImpl extends GenericDaoBase impl return update(vo, sc); } + + @Override + public int expungeByVmList(List vmIds, Long batchSize) { + if (CollectionUtils.isEmpty(vmIds)) { + return 0; + } + SearchBuilder sb = createSearchBuilder(); + sb.and("vmIds", sb.entity().getInstanceId(), SearchCriteria.Op.IN); + SearchCriteria sc = sb.create(); + sc.setParameters("vmIds", vmIds.toArray()); + return batchExpunge(sc, batchSize); + } } diff --git a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java index 6242289caea..a5b2a3b75a5 100644 --- a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java +++ b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java @@ -44,6 +44,7 @@ import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; import com.cloud.agent.api.Command; +import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; @@ -420,7 +421,7 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis } @Override - public UnmanagedInstanceTO cloneHypervisorVMOutOfBand(String hostIp, String vmName, Map params) { + public Pair getHypervisorVMOutOfBandAndCloneIfRequired(String hostIp, String vmName, Map params) { logger.error("Unsupported operation: cannot clone external VM"); return null; } @@ -430,4 +431,16 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis logger.error("Unsupported operation: cannot remove external VM"); return false; } + + @Override + public String createVMTemplateOutOfBand(String hostIp, String vmName, Map params, DataStoreTO templateLocation, int threadsCountToExportOvf) { + logger.error("Unsupported operation: cannot create template file"); + return null; + } + + @Override + public boolean removeVMTemplateOutOfBand(DataStoreTO templateLocation, String templateDir) { + logger.error("Unsupported operation: cannot remove template file"); + return false; + } } diff --git a/server/src/main/java/com/cloud/hypervisor/KVMGuru.java b/server/src/main/java/com/cloud/hypervisor/KVMGuru.java index ff588d06479..c27adc59fde 100644 --- a/server/src/main/java/com/cloud/hypervisor/KVMGuru.java +++ b/server/src/main/java/com/cloud/hypervisor/KVMGuru.java @@ -355,15 +355,14 @@ public class KVMGuru extends HypervisorGuruBase implements HypervisorGuru { vm.setPowerState(VirtualMachine.PowerState.PowerOff); _instanceDao.update(vm.getId(), vm); } - for ( Backup.VolumeInfo VMVolToRestore : vm.getBackupVolumeList()) { + for (Backup.VolumeInfo VMVolToRestore : vm.getBackupVolumeList()) { VolumeVO volume = _volumeDao.findByUuidIncludingRemoved(VMVolToRestore.getUuid()); volume.setState(Volume.State.Ready); _volumeDao.update(volume.getId(), volume); if (VMVolToRestore.getType() == Volume.Type.ROOT) { _volumeDao.update(volume.getId(), volume); _volumeDao.attachVolume(volume.getId(), vm.getId(), 0L); - } - else if ( VMVolToRestore.getType() == Volume.Type.DATADISK) { + } else if (VMVolToRestore.getType() == Volume.Type.DATADISK) { List vmVolumes = _volumeDao.findByInstance(vm.getId()); _volumeDao.update(volume.getId(), volume); _volumeDao.attachVolume(volume.getId(), vm.getId(), getNextAvailableDeviceId(vmVolumes)); diff --git a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java index 390ea155b3c..ffd482b711d 100644 --- a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java +++ b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java @@ -62,6 +62,7 @@ import java.net.URI; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.UUID; @@ -70,6 +71,10 @@ import static com.cloud.configuration.ConfigurationManagerImpl.ADD_HOST_ON_SERVI public abstract class LibvirtServerDiscoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter { private final int _waitTime = 5; /* wait for 5 minutes */ + + private final static HashSet COMPATIBLE_HOST_OSES = new HashSet<>(Arrays.asList("Rocky", "Rocky Linux", + "Red", "Red Hat Enterprise Linux", "Oracle", "Oracle Linux Server", "AlmaLinux")); + private String _kvmPrivateNic; private String _kvmPublicNic; private String _kvmGuestNic; @@ -468,7 +473,7 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements _hostDao.loadDetails(oneHost); String hostOsInCluster = oneHost.getDetail("Host.OS"); String hostOs = ssCmd.getHostDetails().get("Host.OS"); - if (!hostOsInCluster.equalsIgnoreCase(hostOs)) { + if (!isHostOsCompatibleWithOtherHost(hostOsInCluster, hostOs)) { String msg = String.format("host: %s with hostOS, \"%s\"into a cluster, in which there are \"%s\" hosts added", firstCmd.getPrivateIpAddress(), hostOs, hostOsInCluster); if (hostOs != null && hostOs.startsWith(hostOsInCluster)) { logger.warn(String.format("Adding %s. This may or may not be ok!", msg)); @@ -483,6 +488,17 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements return _resourceMgr.fillRoutingHostVO(host, ssCmd, getHypervisorType(), host.getDetails(), null); } + protected boolean isHostOsCompatibleWithOtherHost(String hostOsInCluster, String hostOs) { + if (hostOsInCluster.equalsIgnoreCase(hostOs)) { + return true; + } + if (COMPATIBLE_HOST_OSES.contains(hostOsInCluster) && COMPATIBLE_HOST_OSES.contains(hostOs)) { + logger.info(String.format("The host OS (%s) is compatible with the existing host OS (%s) in the cluster.", hostOs, hostOsInCluster)); + return true; + } + return false; + } + @Override public HostVO createHostVOForDirectConnectAgent(HostVO host, StartupCommand[] startup, ServerResource resource, Map details, List hostTags) { // TODO Auto-generated method stub diff --git a/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java b/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java index 6902068f8ef..e6be174abcd 100644 --- a/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java +++ b/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java @@ -35,6 +35,7 @@ import javax.inject.Inject; import com.cloud.network.dao.PublicIpQuarantineDao; import com.cloud.network.vo.PublicIpQuarantineVO; +import com.cloud.resourcelimit.CheckedReservation; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.annotation.AnnotationService; @@ -53,6 +54,7 @@ import org.apache.cloudstack.region.PortableIp; import org.apache.cloudstack.region.PortableIpDao; import org.apache.cloudstack.region.PortableIpVO; import org.apache.cloudstack.region.Region; +import org.apache.cloudstack.reservation.dao.ReservationDao; import org.apache.commons.collections.CollectionUtils; import com.cloud.agent.AgentManager; @@ -259,6 +261,8 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage @Inject ResourceLimitService _resourceLimitMgr; + @Inject + ReservationDao reservationDao; @Inject NetworkOfferingServiceMapDao _ntwkOfferingSrvcDao; @Inject @@ -717,50 +721,59 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage @Override @DB public boolean disassociatePublicIpAddress(long addrId, long userId, Account caller) { - boolean success = true; - IPAddressVO ipToBeDisassociated = _ipAddressDao.findById(addrId); - PublicIpQuarantine publicIpQuarantine = null; - // Cleanup all ip address resources - PF/LB/Static nat rules - if (!cleanupIpResources(addrId, userId, caller)) { - success = false; - logger.warn("Failed to release resources for ip address id=" + addrId); - } + try { + IPAddressVO ipToBeDisassociated = _ipAddressDao.acquireInLockTable(addrId); - IPAddressVO ip = markIpAsUnavailable(addrId); - if (ip == null) { - return true; - } + if (ipToBeDisassociated == null) { + logger.error(String.format("Unable to acquire lock on public IP %s.", addrId)); + throw new CloudRuntimeException("Unable to acquire lock on public IP."); + } - if (logger.isDebugEnabled()) { - logger.debug("Releasing ip id=" + addrId + "; sourceNat = " + ip.isSourceNat()); - } + PublicIpQuarantine publicIpQuarantine = null; + // Cleanup all ip address resources - PF/LB/Static nat rules + if (!cleanupIpResources(addrId, userId, caller)) { + success = false; + logger.warn("Failed to release resources for ip address id=" + addrId); + } - if (ip.getAssociatedWithNetworkId() != null) { - Network network = _networksDao.findById(ip.getAssociatedWithNetworkId()); - try { - if (!applyIpAssociations(network, rulesContinueOnErrFlag)) { - logger.warn("Unable to apply ip address associations for " + network); - success = false; + IPAddressVO ip = markIpAsUnavailable(addrId); + if (ip == null) { + return true; + } + + if (logger.isDebugEnabled()) { + logger.debug("Releasing ip id=" + addrId + "; sourceNat = " + ip.isSourceNat()); + } + + if (ip.getAssociatedWithNetworkId() != null) { + Network network = _networksDao.findById(ip.getAssociatedWithNetworkId()); + try { + if (!applyIpAssociations(network, rulesContinueOnErrFlag)) { + logger.warn("Unable to apply ip address associations for " + network); + success = false; + } + } catch (ResourceUnavailableException e) { + throw new CloudRuntimeException("We should never get to here because we used true when applyIpAssociations", e); } - } catch (ResourceUnavailableException e) { - throw new CloudRuntimeException("We should never get to here because we used true when applyIpAssociations", e); + } else if (ip.getState() == State.Releasing) { + publicIpQuarantine = addPublicIpAddressToQuarantine(ipToBeDisassociated, caller.getDomainId()); + _ipAddressDao.unassignIpAddress(ip.getId()); } - } else if (ip.getState() == State.Releasing) { - publicIpQuarantine = addPublicIpAddressToQuarantine(ipToBeDisassociated, caller.getDomainId()); - _ipAddressDao.unassignIpAddress(ip.getId()); - } - annotationDao.removeByEntityType(AnnotationService.EntityType.PUBLIC_IP_ADDRESS.name(), ip.getUuid()); + annotationDao.removeByEntityType(AnnotationService.EntityType.PUBLIC_IP_ADDRESS.name(), ip.getUuid()); - if (success) { - if (ip.isPortable()) { - releasePortableIpAddress(addrId); + if (success) { + if (ip.isPortable()) { + releasePortableIpAddress(addrId); + } + logger.debug("Released a public ip id=" + addrId); + } else if (publicIpQuarantine != null) { + removePublicIpAddressFromQuarantine(publicIpQuarantine.getId(), "Public IP address removed from quarantine as there was an error while disassociating it."); } - logger.debug("Released a public ip id=" + addrId); - } else if (publicIpQuarantine != null) { - removePublicIpAddressFromQuarantine(publicIpQuarantine.getId(), "Public IP address removed from quarantine as there was an error while disassociating it."); + } finally { + _ipAddressDao.releaseFromLockTable(addrId); } return success; @@ -1252,7 +1265,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage AcquirePodIpCmdResponse ret = new AcquirePodIpCmdResponse(); ret.setCidrAddress(pod_vo.getCidrAddress()); ret.setGateway(pod_vo.getGateway()); - ret.setInstanceId(vo.getInstanceId()); + ret.setNicId(vo.getNicId()); ret.setIpAddress(vo.getIpAddress()); ret.setMacAddress(vo.getMacAddress()); ret.setPodId(vo.getPodId()); @@ -1539,14 +1552,15 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage logger.debug("Associating ip " + ipToAssoc + " to network " + network); - IPAddressVO ip = _ipAddressDao.findById(ipId); - //update ip address with networkId - ip.setAssociatedWithNetworkId(networkId); - ip.setSourceNat(isSourceNat); - _ipAddressDao.update(ipId, ip); - boolean success = false; - try { + IPAddressVO ip = null; + try (CheckedReservation publicIpReservation = new CheckedReservation(owner, ResourceType.public_ip, 1l, reservationDao, _resourceLimitMgr)) { + ip = _ipAddressDao.findById(ipId); + //update ip address with networkId + ip.setAssociatedWithNetworkId(networkId); + ip.setSourceNat(isSourceNat); + _ipAddressDao.update(ipId, ip); + success = applyIpAssociations(network, false); if (success) { logger.debug("Successfully associated ip address " + ip.getAddress().addr() + " to network " + network); @@ -1554,6 +1568,9 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage logger.warn("Failed to associate ip address " + ip.getAddress().addr() + " to network " + network); } return _ipAddressDao.findById(ipId); + } catch (Exception e) { + logger.error(String.format("Failed to associate ip address %s to network %s", ipToAssoc, network), e); + throw new CloudRuntimeException(String.format("Failed to associate ip address %s to network %s", ipToAssoc, network), e); } finally { if (!success && releaseOnFailure) { if (ip != null) { @@ -1861,7 +1878,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage guestNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network", owner.getAccountName() + "-network", null, null, null, false, null, owner, null, physicalNetwork, zoneId, ACLType.Account, null, null, null, null, true, null, null, null, null, null, - null, null, null, null, null); + null, null, null, null, null, null); if (guestNetwork == null) { logger.warn("Failed to create default Virtual network for the account " + accountId + "in zone " + zoneId); throw new CloudRuntimeException("Failed to create a Guest Isolated Networks with SourceNAT " diff --git a/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java b/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java index a3432a8633a..d5b3cab44a6 100644 --- a/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java +++ b/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java @@ -557,9 +557,7 @@ public class Ipv6ServiceImpl extends ComponentLifecycleBase implements Ipv6Servi final long accountId = network.getAccountId(); final long domainId = network.getDomainId(); - if (FirewallRule.TrafficType.Egress.equals(trafficType)) { - accountManager.checkAccess(caller, null, true, network); - } + accountManager.checkAccess(caller, null, true, network); // Verify that the network guru supports the protocol specified Map caps = networkModel.getNetworkServiceCapabilities(network.getId(), Network.Service.Firewall); diff --git a/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java b/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java index 16473e837a9..39546dc9061 100644 --- a/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java @@ -301,7 +301,7 @@ public class NetworkMigrationManagerImpl implements NetworkMigrationManager { copyOfVpc = _vpcService.createVpc(vpc.getZoneId(), vpcOfferingId, vpc.getAccountId(), vpc.getName(), vpc.getDisplayText(), vpc.getCidr(), vpc.getNetworkDomain(), vpc.getIp4Dns1(), vpc.getIp4Dns2(), - vpc.getIp6Dns1(), vpc.getIp6Dns2(), vpc.isDisplay(), vpc.getPublicMtu()); + vpc.getIp6Dns1(), vpc.getIp6Dns2(), vpc.isDisplay(), vpc.getPublicMtu(), null, null, null); copyOfVpcId = copyOfVpc.getId(); //on resume of migration the uuid will be swapped already. So the copy will have the value of the original vpcid. diff --git a/server/src/main/java/com/cloud/network/NetworkModelImpl.java b/server/src/main/java/com/cloud/network/NetworkModelImpl.java index 1a994d5c0a5..1276ec22067 100644 --- a/server/src/main/java/com/cloud/network/NetworkModelImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkModelImpl.java @@ -18,7 +18,6 @@ package com.cloud.network; import java.math.BigInteger; -import java.security.InvalidParameterException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; @@ -146,6 +145,8 @@ import com.cloud.vm.dao.NicDao; import com.cloud.vm.dao.NicSecondaryIpDao; import com.cloud.vm.dao.VMInstanceDao; +import static com.cloud.network.Network.Service.SecurityGroup; + public class NetworkModelImpl extends ManagerBase implements NetworkModel, Configurable { public static final String UNABLE_TO_USE_NETWORK = "Unable to use network with id= %s, permission denied"; @Inject @@ -417,10 +418,10 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi } // Since it's non-conserve mode, only one service should used for IP if (services.size() != 1) { - throw new InvalidParameterException("There are multiple services used ip " + ip.getAddress() + "."); + throw new InvalidParameterValueException("There are multiple services used ip " + ip.getAddress() + "."); } if (service != null && !((Service)services.toArray()[0] == service || service.equals(Service.Firewall))) { - throw new InvalidParameterException("The IP " + ip.getAddress() + " is already used as " + ((Service)services.toArray()[0]).getName() + " rather than " + + throw new InvalidParameterValueException("The IP " + ip.getAddress() + " is already used as " + ((Service)services.toArray()[0]).getName() + " rather than " + service.getName()); } return true; @@ -458,7 +459,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi // Since IP already has service to bind with, the oldProvider can't be null Set newProviders = serviceToProviders.get(service); if (newProviders == null || newProviders.isEmpty()) { - throw new InvalidParameterException("There is no new provider for IP " + publicIp.getAddress() + " of service " + service.getName() + "!"); + throw new InvalidParameterValueException("There is no new provider for IP " + publicIp.getAddress() + " of service " + service.getName() + "!"); } Provider newProvider = (Provider)newProviders.toArray()[0]; Set oldProviders = serviceToProviders.get(services.toArray()[0]); @@ -471,7 +472,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi IpDeployer newIpDeployer = ((IpDeployingRequester)newElement).getIpDeployer(network); // FIXME: I ignored this check } else { - throw new InvalidParameterException("Ip cannot be applied for new provider!"); + throw new InvalidParameterValueException("Ip cannot be applied for new provider!"); } return true; } @@ -787,13 +788,19 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi } @Override - public NetworkVO getNetworkWithSGWithFreeIPs(Long zoneId) { + public NetworkVO getNetworkWithSGWithFreeIPs(Account account, Long zoneId) { List networks = _networksDao.listByZoneSecurityGroup(zoneId); if (networks == null || networks.isEmpty()) { return null; } NetworkVO ret_network = null; for (NetworkVO nw : networks) { + try { + checkAccountNetworkPermissions(account, nw); + } catch (PermissionDeniedException e) { + continue; + } + List vlans = _vlanDao.listVlansByNetworkId(nw.getId()); for (VlanVO vlan : vlans) { if (_ipAddressDao.countFreeIpsInVlan(vlan.getId()) > 0) { @@ -1149,6 +1156,11 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi return _ntwkSrvcDao.canProviderSupportServiceInNetwork(networkId, service, provider); } + @Override + public boolean isAnyServiceSupportedInNetwork(long networkId, Provider provider, Service... services) { + return _ntwkSrvcDao.isAnyServiceSupportedInNetwork(networkId, provider, services); + } + @Override public List listSupportedNetworkServiceProviders(String serviceName) { Network.Service service = null; @@ -1263,7 +1275,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi physicalNetworkId = findPhysicalNetworkId(network.getDataCenterId(), null, null); } - return isServiceEnabledInNetwork(physicalNetworkId, network.getId(), Service.SecurityGroup); + return isServiceEnabledInNetwork(physicalNetworkId, network.getId(), SecurityGroup); } @Override @@ -1598,6 +1610,10 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi } NetworkVO network = _networksDao.findById(networkId); + if (network == null) { + throw new CloudRuntimeException("Could not find network associated with public IP."); + } + NetworkOfferingVO offering = _networkOfferingDao.findById(network.getNetworkOfferingId()); if (offering.getGuestType() != GuestType.Isolated) { return true; @@ -2170,7 +2186,6 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi NetworkVO network = _networksDao.findById(networkId); Integer networkRate = getNetworkRate(network.getId(), vm.getId()); -// NetworkGuru guru = _networkGurus.get(network.getGuruName()); NicProfile profile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), networkRate, isSecurityGroupSupportedInNetwork(network), getNetworkTag( vm.getHypervisorType(), network)); @@ -2180,7 +2195,17 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi if (network.getTrafficType() == TrafficType.Guest && network.getPrivateMtu() != null) { profile.setMtu(network.getPrivateMtu()); } -// guru.updateNicProfile(profile, network); + + DataCenter dc = _dcDao.findById(network.getDataCenterId()); + + Pair ip4Dns = getNetworkIp4Dns(network, dc); + profile.setIPv4Dns1(ip4Dns.first()); + profile.setIPv4Dns2(ip4Dns.second()); + + Pair ip6Dns = getNetworkIp6Dns(network, dc); + profile.setIPv6Dns1(ip6Dns.first()); + profile.setIPv6Dns2(ip6Dns.second()); + return profile; } @@ -2387,7 +2412,9 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi throw new InvalidParameterValueException("endIPv6 is not in ip6cidr indicated network!"); } } + } + public void checkIp6CidrSizeEqualTo64(String ip6Cidr) { int cidrSize = NetUtils.getIp6CidrSize(ip6Cidr); // we only support cidr == 64 if (cidrSize != 64) { @@ -2741,4 +2768,39 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi throw new InvalidParameterValueException("Invalid IPv6 for IPv6 DNS2"); } } + + @Override + public boolean isSecurityGroupSupportedForZone(Long zoneId) { + List networks = getPhysicalNtwksSupportingTrafficType(zoneId, TrafficType.Guest); + for (PhysicalNetwork network : networks ) { + if (_pNSPDao.isServiceProviderEnabled(network.getId(), Provider.SecurityGroupProvider.getName(), Service.SecurityGroup.getName())) { + return true; + } + } + return false; + } + + @Override + public boolean checkSecurityGroupSupportForNetwork(Account account, DataCenter zone, + List networkIds, + List securityGroupsIds) { + if (zone.isSecurityGroupEnabled()) { + return true; + } + if (CollectionUtils.isNotEmpty(networkIds)) { + for (Long networkId : networkIds) { + Network network = _networksDao.findById(networkId); + if (network == null) { + throw new InvalidParameterValueException("Unable to find network by id " + networkId); + } + if (network.getGuestType() == Network.GuestType.Shared && isSecurityGroupSupportedInNetwork(network)) { + return true; + } + } + } else if (CollectionUtils.isNotEmpty(securityGroupsIds)) { + Network networkWithSecurityGroup = getNetworkWithSGWithFreeIPs(account, zone.getId()); + return networkWithSecurityGroup != null; + } + return false; + } } diff --git a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java index 0ec5d2ec0d3..70255f01b1c 100644 --- a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java @@ -20,7 +20,6 @@ import java.net.Inet6Address; import java.net.InetAddress; import java.net.URI; import java.net.UnknownHostException; -import java.security.InvalidParameterException; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; @@ -37,11 +36,11 @@ import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.UUID; -import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.bgp.BGPService; import com.cloud.dc.VlanDetailsVO; import com.cloud.dc.dao.VlanDetailsDao; import com.cloud.network.dao.NsxProviderDao; @@ -83,10 +82,12 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; import org.apache.cloudstack.network.NetworkPermissionVO; +import org.apache.cloudstack.network.RoutedIpv4Manager; import org.apache.cloudstack.network.dao.NetworkPermissionDao; import org.apache.cloudstack.network.element.InternalLoadBalancerElementService; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.EnumUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; @@ -205,6 +206,7 @@ import com.cloud.projects.Project; import com.cloud.projects.ProjectManager; import com.cloud.server.ResourceTag; import com.cloud.server.ResourceTag.ResourceObjectType; +import com.cloud.service.ServiceOfferingVO; import com.cloud.tags.ResourceTagVO; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.user.Account; @@ -260,7 +262,6 @@ import com.cloud.vm.dao.NicSecondaryIpVO; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; import com.googlecode.ipv6.IPv6Address; -import com.cloud.service.ServiceOfferingVO; /** * NetworkServiceImpl implements NetworkService. @@ -416,6 +417,11 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C NsxProviderDao nsxProviderDao; @Inject private VirtualRouterProviderDao virtualRouterProviderDao; + @Inject + RoutedIpv4Manager routedIpv4Manager; + @Inject + private BGPService bgpService; + List internalLoadBalancerElementServices = new ArrayList<>(); Map internalLoadBalancerElementServiceMap = new HashMap<>(); @@ -551,10 +557,10 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } // Since it's non-conserve mode, only one service should be used for IP if (services.size() != 1) { - throw new InvalidParameterException("There are multiple services used ip " + ip.getAddress() + "."); + throw new InvalidParameterValueException("There are multiple services used ip " + ip.getAddress() + "."); } if (service != null && !((Service)services.toArray()[0] == service || service.equals(Service.Firewall))) { - throw new InvalidParameterException("The IP " + ip.getAddress() + " is already used as " + ((Service)services.toArray()[0]).getName() + " rather than " + service.getName()); + throw new InvalidParameterValueException("The IP " + ip.getAddress() + " is already used as " + ((Service)services.toArray()[0]).getName() + " rather than " + service.getName()); } return true; } @@ -600,7 +606,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } // We don't support multiple providers for one service now if (!provider.equals(curProvider)) { - throw new InvalidParameterException("There would be multiple providers for IP " + ip.getAddress() + " with the new network offering!"); + throw new InvalidParameterValueException("There would be multiple providers for IP " + ip.getAddress() + " with the new network offering!"); } } } @@ -1378,6 +1384,40 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } } + void validateNetworkCidrSize(Account caller, Integer cidrSize, String cidr, NetworkOffering networkOffering, long accountId) { + if (!GuestType.Isolated.equals(networkOffering.getGuestType())) { + if (cidrSize != null) { + throw new InvalidParameterValueException("network cidr size is only applicable on Isolated networks"); + } + return; + } + if (ObjectUtils.allNotNull(cidr, cidrSize)) { + throw new InvalidParameterValueException("network cidr and cidr size are mutually exclusive"); + } + if (NetworkOffering.NetworkMode.ROUTED.equals(networkOffering.getNetworkMode()) + && routedIpv4Manager.isVirtualRouterGateway(networkOffering)) { + if (cidr != null) { + if (!networkOffering.isForVpc() && !_accountMgr.isRootAdmin(caller.getId())) { + throw new InvalidParameterValueException("Only root admin can set the gateway/netmask of Isolated networks with ROUTED mode"); + } + return; + } + if (cidrSize == null) { + throw new InvalidParameterValueException("network cidr or cidr size is required for Isolated networks with ROUTED mode"); + } + Integer maxCidrSize = routedIpv4Manager.RoutedNetworkIPv4MaxCidrSize.valueIn(accountId); + if (cidrSize > maxCidrSize) { + throw new InvalidParameterValueException("network cidr size cannot be bigger than maximum cidr size " + maxCidrSize); + } + Integer minCidrSize = routedIpv4Manager.RoutedNetworkIPv4MinCidrSize.valueIn(accountId); + if (cidrSize < minCidrSize) { + throw new InvalidParameterValueException("network cidr size cannot be smaller than minimum cidr size " + minCidrSize); + } + } else if (cidrSize != null) { + throw new InvalidParameterValueException("network cidr size is only applicable on Isolated networks with ROUTED mode: " + cidrSize); + } + } + void validateSharedNetworkRouterIPs(String gateway, String startIP, String endIP, String netmask, String routerIPv4, String routerIPv6, String startIPv6, String endIPv6, String ip6Cidr, NetworkOffering ntwkOff) { if (ntwkOff.getGuestType() == GuestType.Shared) { validateSharedNetworkRouterIPv4(routerIPv4, startIP, endIP, gateway, netmask); @@ -1453,6 +1493,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C boolean hideIpAddressUsage = adminCalledUs && ((CreateNetworkCmdByAdmin)cmd).getHideIpAddressUsage(); String routerIPv4 = adminCalledUs ? ((CreateNetworkCmdByAdmin)cmd).getRouterIp() : null; String routerIPv6 = adminCalledUs ? ((CreateNetworkCmdByAdmin)cmd).getRouterIpv6() : null; + Long asNumber = cmd.getAsNumber(); String name = cmd.getNetworkName(); String displayText = cmd.getDisplayText(); @@ -1477,6 +1518,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C String ip4Dns2 = cmd.getIp4Dns2(); String ip6Dns1 = cmd.getIp6Dns1(); String ip6Dns2 = cmd.getIp6Dns2(); + Integer networkCidrSize = cmd.getCidrSize(); + List bgpPeerIds = adminCalledUs ? ((CreateNetworkCmdByAdmin)cmd).getBgpPeerIds() : null; // Validate network offering id NetworkOffering ntwkOff = getAndValidateNetworkOffering(networkOfferingId); @@ -1591,6 +1634,9 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C endIPv6 = startIPv6; } _networkModel.checkIp6Parameters(startIPv6, endIPv6, ip6Gateway, ip6Cidr); + if (!GuestType.Shared.equals(ntwkOff.getGuestType())) { + _networkModel.checkIp6CidrSizeEqualTo64(ip6Cidr); + } if (zone.getNetworkType() != NetworkType.Advanced || ntwkOff.getGuestType() != Network.GuestType.Shared) { throw new InvalidParameterValueException("Can only support create IPv6 network with advance shared network!"); @@ -1605,6 +1651,12 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } } + if (isNonVpcNetworkSupportingDynamicRouting(ntwkOff) && ntwkOff.isSpecifyAsNumber() && asNumber == null) { + throw new InvalidParameterValueException("AS number is required for the network but not passed."); + } + + validateNetworkCidrSize(caller, networkCidrSize, cidr, ntwkOff, owner.getAccountId()); + validateSharedNetworkRouterIPs(gateway, startIP, endIP, netmask, routerIPv4, routerIPv6, startIPv6, endIPv6, ip6Cidr, ntwkOff); Pair ip6GatewayCidr = null; @@ -1653,6 +1705,17 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C throw new InvalidParameterValueException("Only ROOT admin is allowed to specify vlanId or bypass vlan overlap check"); } + // Validate BGP peers + if (CollectionUtils.isNotEmpty(bgpPeerIds)) { + if (vpcId != null) { + throw new InvalidParameterValueException("The BGP peers of VPC tiers will inherit from the VPC, do not add separately."); + } + if (!routedIpv4Manager.isDynamicRoutedNetwork(ntwkOff)) { + throw new InvalidParameterValueException("The network offering does not support Dynamic routing"); + } + routedIpv4Manager.validateBgpPeers(owner, zone.getId(), bgpPeerIds); + } + if (ipv4) { // For non-root admins check cidr limit - if it's allowed by global config value if (!_accountMgr.isRootAdmin(caller.getId()) && cidr != null) { @@ -1727,7 +1790,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C Network network = commitNetwork(networkOfferingId, gateway, startIP, endIP, netmask, networkDomain, vlanId, bypassVlanOverlapCheck, name, displayText, caller, physicalNetworkId, zone.getId(), domainId, isDomainSpecific, subdomainAccess, vpcId, startIPv6, endIPv6, ip6Gateway, ip6Cidr, displayNetwork, aclId, secondaryVlanId, privateVlanType, ntwkOff, pNtwk, aclType, owner, cidr, createVlan, - externalId, routerIPv4, routerIPv6, associatedNetwork, ip4Dns1, ip4Dns2, ip6Dns1, ip6Dns2, interfaceMTUs); + externalId, routerIPv4, routerIPv6, associatedNetwork, ip4Dns1, ip4Dns2, ip6Dns1, ip6Dns2, interfaceMTUs, networkCidrSize); // retrieve, acquire and associate the correct IP addresses checkAndSetRouterSourceNatIp(owner, cmd, network); @@ -1740,6 +1803,22 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C ipv6Service.assignIpv6SubnetToNetwork(ip6Cidr, network.getId()); } + // assign to network + if (NetworkOffering.NetworkMode.ROUTED.equals(ntwkOff.getNetworkMode())) { + routedIpv4Manager.assignIpv4SubnetToNetwork(network); + } + if (isNonVpcNetworkSupportingDynamicRouting(ntwkOff)) { + try { + bgpService.allocateASNumber(zone.getId(), asNumber, network.getId(), null); + } catch (CloudRuntimeException ex) { + deleteNetwork(network.getId(), true); + throw ex; + } + } + if (CollectionUtils.isNotEmpty(bgpPeerIds)) { + routedIpv4Manager.persistBgpPeersForGuestNetwork(network.getId(), bgpPeerIds); + } + // if the network offering has persistent set to true, implement the network if (ntwkOff.isPersistent()) { return implementedNetworkInCreation(caller, zone, network); @@ -1747,15 +1826,31 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C return network; } + private boolean isNonVpcNetworkSupportingDynamicRouting(NetworkOffering networkOffering) { + return !networkOffering.isForVpc() && NetworkOffering.RoutingMode.Dynamic == networkOffering.getRoutingMode(); + } + private void validateNetworkCreationSupported(long zoneId, String zoneName, GuestType guestType) { NsxProviderVO nsxProviderVO = nsxProviderDao.findByZoneId(zoneId); - if (Objects.nonNull(nsxProviderVO) && List.of(GuestType.L2, GuestType.Shared).contains(guestType)) { + if (Objects.nonNull(nsxProviderVO) && GuestType.L2.equals(guestType)) { throw new InvalidParameterValueException( String.format("Creation of %s networks is not supported in NSX enabled zone %s", guestType.name(), zoneName) ); } } + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_NETWORK_CREATE, eventDescription = "creating network") + public Network createGuestNetwork(long networkOfferingId, String name, String displayText, Account owner, + PhysicalNetwork physicalNetwork, long zoneId, ACLType aclType) throws + InsufficientCapacityException, ConcurrentOperationException, ResourceAllocationException { + return _networkMgr.createGuestNetwork(networkOfferingId, name, displayText, + null, null, null, false, null, owner, null, physicalNetwork, zoneId, + aclType, null, null, null, null, true, null, + null, null, null, null, null, null, null, null, null, null); + } + void checkAndSetRouterSourceNatIp(Account owner, CreateNetworkCmd cmd, Network network) throws InsufficientAddressCapacityException, ResourceAllocationException { String sourceNatIp = cmd.getSourceNatIP(); if (sourceNatIp == null) { @@ -2134,12 +2229,13 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } } - private Network commitNetwork(final Long networkOfferingId, final String gateway, final String startIP, final String endIP, final String netmask, final String networkDomain, final String vlanIdFinal, + protected Network commitNetwork(final Long networkOfferingId, final String gateway, final String startIP, final String endIP, final String netmask, final String networkDomain, final String vlanIdFinal, final Boolean bypassVlanOverlapCheck, final String name, final String displayText, final Account caller, final Long physicalNetworkId, final Long zoneId, final Long domainId, final boolean isDomainSpecific, final Boolean subdomainAccessFinal, final Long vpcId, final String startIPv6, final String endIPv6, final String ip6Gateway, final String ip6Cidr, final Boolean displayNetwork, final Long aclId, final String isolatedPvlan, final PVlanType isolatedPvlanType, final NetworkOffering ntwkOff, final PhysicalNetwork pNtwk, final ACLType aclType, final Account ownerFinal, final String cidr, final boolean createVlan, final String externalId, String routerIp, String routerIpv6, - final Network associatedNetwork, final String ip4Dns1, final String ip4Dns2, final String ip6Dns1, final String ip6Dns2, Pair vrIfaceMTUs) throws InsufficientCapacityException, ResourceAllocationException { + final Network associatedNetwork, final String ip4Dns1, final String ip4Dns2, final String ip6Dns1, final String ip6Dns2, Pair vrIfaceMTUs, + final Integer networkCidrSize) throws InsufficientCapacityException, ResourceAllocationException { try { Network network = Transaction.execute(new TransactionCallbackWithException() { @Override @@ -2195,7 +2291,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } } network = _vpcMgr.createVpcGuestNetwork(networkOfferingId, name, displayText, gateway, cidr, vlanId, networkDomain, owner, sharedDomainId, pNtwk, zoneId, aclType, - subdomainAccess, vpcId, aclId, caller, displayNetwork, externalId, ip6Gateway, ip6Cidr, ip4Dns1, ip4Dns2, ip6Dns1, ip6Dns2, vrIfaceMTUs); + subdomainAccess, vpcId, aclId, caller, displayNetwork, externalId, ip6Gateway, ip6Cidr, ip4Dns1, ip4Dns2, ip6Dns1, ip6Dns2, vrIfaceMTUs, networkCidrSize); } else { if (_configMgr.isOfferingForVpc(ntwkOff)) { throw new InvalidParameterValueException("Network offering can be used for VPC networks only"); @@ -2204,7 +2300,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C throw new InvalidParameterValueException("Internal Lb can be enabled on vpc networks only"); } network = _networkMgr.createGuestNetwork(networkOfferingId, name, displayText, gateway, cidr, vlanId, bypassVlanOverlapCheck, networkDomain, owner, sharedDomainId, pNtwk, - zoneId, aclType, subdomainAccess, vpcId, ip6Gateway, ip6Cidr, displayNetwork, isolatedPvlan, isolatedPvlanType, externalId, routerIp, routerIpv6, ip4Dns1, ip4Dns2, ip6Dns1, ip6Dns2, vrIfaceMTUs); + zoneId, aclType, subdomainAccess, vpcId, ip6Gateway, ip6Cidr, displayNetwork, isolatedPvlan, isolatedPvlanType, externalId, routerIp, routerIpv6, ip4Dns1, ip4Dns2, + ip6Dns1, ip6Dns2, vrIfaceMTUs, networkCidrSize); } if (createVlan && network != null) { @@ -2266,6 +2363,9 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C Long associatedNetworkId = cmd.getAssociatedNetworkId(); String networkFilterStr = cmd.getNetworkFilter(); + boolean applyManualPagination = CollectionUtils.isNotEmpty(supportedServicesStr) || + Boolean.TRUE.equals(canUseForDeploy); + String vlanId = null; if (cmd instanceof ListNetworksCmdByAdmin) { vlanId = ((ListNetworksCmdByAdmin)cmd).getVlan(); @@ -2351,7 +2451,13 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C isRecursive = true; } - Filter searchFilter = new Filter(NetworkVO.class, "id", false, null, null); + Long offset = cmd.getStartIndex(); + Long limit = cmd.getPageSizeVal(); + if (applyManualPagination) { + offset = null; + limit = null; + } + Filter searchFilter = new Filter(NetworkVO.class, "id", false, offset, limit); SearchBuilder sb = _networksDao.createSearchBuilder(); if (forVpc != null) { @@ -2390,14 +2496,14 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C if (permittedAccounts.isEmpty()) { SearchBuilder domainSearch = _domainDao.createSearchBuilder(); domainSearch.and("path", domainSearch.entity().getPath(), SearchCriteria.Op.LIKE); - sb.join("domainSearch", domainSearch, sb.entity().getDomainId(), domainSearch.entity().getId(), JoinBuilder.JoinType.INNER); + sb.join("domain", domainSearch, sb.entity().getDomainId(), domainSearch.entity().getId(), JoinBuilder.JoinType.INNER); } SearchBuilder accountSearch = _accountDao.createSearchBuilder(); accountSearch.and("typeNEQ", accountSearch.entity().getType(), SearchCriteria.Op.NEQ); accountSearch.and("typeEQ", accountSearch.entity().getType(), SearchCriteria.Op.EQ); - sb.join("accountSearch", accountSearch, sb.entity().getAccountId(), accountSearch.entity().getId(), JoinBuilder.JoinType.INNER); + sb.join("account", accountSearch, sb.entity().getAccountId(), accountSearch.entity().getId(), JoinBuilder.JoinType.INNER); if (associatedNetworkId != null) { SearchBuilder associatedNetworkSearch = _networkDetailsDao.createSearchBuilder(); @@ -2406,113 +2512,138 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C sb.join("associatedNetworkSearch", associatedNetworkSearch, sb.entity().getId(), associatedNetworkSearch.entity().getResourceId(), JoinBuilder.JoinType.INNER); } - List networksToReturn = new ArrayList(); - - if (isSystem == null || !isSystem) { - if (!permittedAccounts.isEmpty()) { - if (Arrays.asList(Network.NetworkFilter.Account, Network.NetworkFilter.AccountDomain, Network.NetworkFilter.All).contains(networkFilter)) { - //get account level networks - networksToReturn.addAll(listAccountSpecificNetworks(buildNetworkSearchCriteria(sb, keyword, id, isSystem, zoneId, guestIpType, trafficType, physicalNetworkId, networkOfferingId, - aclType, skipProjectNetworks, restartRequired, specifyIpRanges, vpcId, tags, display, vlanId, associatedNetworkId), searchFilter, permittedAccounts)); - } - if (domainId != null && Arrays.asList(Network.NetworkFilter.Domain, Network.NetworkFilter.AccountDomain, Network.NetworkFilter.All).contains(networkFilter)) { - //get domain level networks - networksToReturn.addAll(listDomainLevelNetworks(buildNetworkSearchCriteria(sb, keyword, id, isSystem, zoneId, guestIpType, trafficType, physicalNetworkId, networkOfferingId, - aclType, true, restartRequired, specifyIpRanges, vpcId, tags, display, vlanId, associatedNetworkId), searchFilter, domainId, false)); - } - if (Arrays.asList(Network.NetworkFilter.Shared, Network.NetworkFilter.All).contains(networkFilter)) { - // get shared networks - List sharedNetworks = listSharedNetworks(buildNetworkSearchCriteria(sb, keyword, id, isSystem, zoneId, guestIpType, trafficType, physicalNetworkId, networkOfferingId, - aclType, true, restartRequired, specifyIpRanges, vpcId, tags, display, vlanId, associatedNetworkId), searchFilter, permittedAccounts); - addNetworksToReturnIfNotExist(networksToReturn, sharedNetworks); - - } - } else { - if (Arrays.asList(Network.NetworkFilter.Account, Network.NetworkFilter.AccountDomain, Network.NetworkFilter.All).contains(networkFilter)) { - //add account specific networks - networksToReturn.addAll(listAccountSpecificNetworksByDomainPath(buildNetworkSearchCriteria(sb, keyword, id, isSystem, zoneId, guestIpType, trafficType, physicalNetworkId, networkOfferingId, - aclType, skipProjectNetworks, restartRequired, specifyIpRanges, vpcId, tags, display, vlanId, associatedNetworkId), searchFilter, path, isRecursive)); - } - if (Arrays.asList(Network.NetworkFilter.Domain, Network.NetworkFilter.AccountDomain, Network.NetworkFilter.All).contains(networkFilter)) { - //add domain specific networks of domain + parent domains - networksToReturn.addAll(listDomainSpecificNetworksByDomainPath(buildNetworkSearchCriteria(sb, keyword, id, isSystem, zoneId, guestIpType, trafficType, physicalNetworkId, networkOfferingId, - aclType, true, restartRequired, specifyIpRanges, vpcId, tags, display, vlanId, associatedNetworkId), searchFilter, path, isRecursive)); - //add networks of subdomains - if (domainId == null) { - networksToReturn.addAll(listDomainLevelNetworks(buildNetworkSearchCriteria(sb, keyword, id, isSystem, zoneId, guestIpType, trafficType, physicalNetworkId, networkOfferingId, - aclType, true, restartRequired, specifyIpRanges, vpcId, tags, display, vlanId, associatedNetworkId), searchFilter, caller.getDomainId(), true)); - } - } - if (Arrays.asList(Network.NetworkFilter.Shared, Network.NetworkFilter.All).contains(networkFilter)) { - // get shared networks - List sharedNetworks = listSharedNetworksByDomainPath(buildNetworkSearchCriteria(sb, keyword, id, isSystem, zoneId, guestIpType, trafficType, physicalNetworkId, networkOfferingId, - aclType, true, restartRequired, specifyIpRanges, vpcId, tags, display, vlanId, associatedNetworkId), searchFilter, path, isRecursive); - addNetworksToReturnIfNotExist(networksToReturn, sharedNetworks); - } - } + Pair, Integer> result = new Pair<>(new ArrayList<>(), 0); + if (BooleanUtils.isTrue(isSystem)) { + SearchCriteria sc = createNetworkSearchCriteria(sb, keyword, id, isSystem, zoneId, guestIpType, trafficType, + physicalNetworkId, networkOfferingId, null, restartRequired, specifyIpRanges, + vpcId, tags, display, vlanId, associatedNetworkId); + addProjectNetworksConditionToSearch(sc, true); + result = _networksDao.searchAndCount(sc, searchFilter); } else { - networksToReturn = _networksDao.search(buildNetworkSearchCriteria(sb, keyword, id, isSystem, zoneId, guestIpType, trafficType, physicalNetworkId, networkOfferingId, - null, true, restartRequired, specifyIpRanges, vpcId, tags, display, vlanId, associatedNetworkId), searchFilter); + SearchCriteria additionalSC = _networksDao.createSearchCriteria(); + + addAccountSpecificNetworksToSearch(additionalSC, sb, networkFilter, skipProjectNetworks, permittedAccounts, path, isRecursive); + addDomainSpecificNetworksToSearch(additionalSC, sb, networkFilter, permittedAccounts, domainId, path, isRecursive); + addSharedNetworksToSearch(additionalSC, sb, networkFilter, permittedAccounts, path, isRecursive); + + if (CollectionUtils.isNotEmpty(additionalSC.getValues())) { + SearchCriteria sc = createNetworkSearchCriteria(sb, keyword, id, isSystem, zoneId, guestIpType, + trafficType, physicalNetworkId, networkOfferingId, aclType, restartRequired, specifyIpRanges, vpcId, + tags, display, vlanId, associatedNetworkId); + sc.addAnd("id", SearchCriteria.Op.SC, additionalSC); + result = _networksDao.searchAndCount(sc, searchFilter); + } } + List networksToReturn = result.first(); if (supportedServicesStr != null && !supportedServicesStr.isEmpty() && !networksToReturn.isEmpty()) { - List supportedNetworks = new ArrayList(); - Service[] suppportedServices = new Service[supportedServicesStr.size()]; + List supportedNetworks = new ArrayList<>(); + Service[] supportedServices = new Service[supportedServicesStr.size()]; int i = 0; for (String supportedServiceStr : supportedServicesStr) { Service service = Service.getService(supportedServiceStr); if (service == null) { throw new InvalidParameterValueException("Invalid service specified " + supportedServiceStr); } else { - suppportedServices[i] = service; + supportedServices[i] = service; } i++; } - for (NetworkVO network : networksToReturn) { - if (areServicesSupportedInNetwork(network.getId(), suppportedServices)) { + if (areServicesSupportedInNetwork(network.getId(), supportedServices)) { supportedNetworks.add(network); } } - networksToReturn = supportedNetworks; } if (canUseForDeploy != null) { - List networksForDeploy = new ArrayList(); + List networksForDeploy = new ArrayList<>(); for (NetworkVO network : networksToReturn) { if (_networkModel.canUseForDeploy(network) == canUseForDeploy) { networksForDeploy.add(network); } } - networksToReturn = networksForDeploy; } - //Now apply pagination - List wPagination = com.cloud.utils.StringUtils.applyPagination(networksToReturn, cmd.getStartIndex(), cmd.getPageSizeVal()); - if (wPagination != null) { - Pair, Integer> listWPagination = new Pair, Integer>(wPagination, networksToReturn.size()); - return listWPagination; + if (applyManualPagination) { + //Now apply pagination + List wPagination = com.cloud.utils.StringUtils.applyPagination(networksToReturn, cmd.getStartIndex(), cmd.getPageSizeVal()); + if (wPagination != null) { + Pair, Integer> listWPagination = new Pair<>(wPagination, networksToReturn.size()); + return listWPagination; + } + return new Pair<>(networksToReturn, networksToReturn.size()); } - return new Pair, Integer>(networksToReturn, networksToReturn.size()); + return new Pair<>(result.first(), result.second()); } - private void addNetworksToReturnIfNotExist(final List networksToReturn, final List sharedNetworks) { - Set networkIds = networksToReturn.stream() - .map(NetworkVO::getId) - .collect(Collectors.toSet()); - List sharedNetworksToReturn = sharedNetworks.stream() - .filter(network -> ! networkIds.contains(network.getId())) - .collect(Collectors.toList()); - networksToReturn.addAll(sharedNetworksToReturn); + private void addAccountSpecificNetworksToSearch(SearchCriteria additionalSC, SearchBuilder sb, + Network.NetworkFilter networkFilter, boolean skipProjectNetworks, + List permittedAccounts, String path, boolean isRecursive) { + if (!Arrays.asList(Network.NetworkFilter.Account, Network.NetworkFilter.AccountDomain, Network.NetworkFilter.All).contains(networkFilter)) { + return; + } + + SearchCriteria accountSC = sb.create(); + accountSC.addAnd("aclType", SearchCriteria.Op.EQ, ACLType.Account.toString()); + if (permittedAccounts.isEmpty()) { + if (path != null) { + // accountSC's WHERE clause gets OR-included in additionalSC's WHERE clause, which then gets AND-included + // in the main search criteria's WHERE clause. If we added the path filter as join parameters, it would not + // be present in the main search criteria. This is a way to add a condition that would normally go to accountSC's + // join parameters to its WHERE clause. + accountSC.getJoin("domain").addAnd("path", SearchCriteria.Op.LIKE, isRecursive ? path + "%" : path); + accountSC.addAnd("id", SearchCriteria.Op.SC, accountSC.getJoin("domain")); + } + } else { + accountSC.addAnd("accountId", SearchCriteria.Op.IN, permittedAccounts.toArray()); + } + addProjectNetworksConditionToSearch(accountSC, skipProjectNetworks); + additionalSC.addOr("id", SearchCriteria.Op.SC, accountSC); } - private SearchCriteria buildNetworkSearchCriteria(SearchBuilder sb, String keyword, Long id, - Boolean isSystem, Long zoneId, String guestIpType, String trafficType, Long physicalNetworkId, - Long networkOfferingId, String aclType, boolean skipProjectNetworks, Boolean restartRequired, - Boolean specifyIpRanges, Long vpcId, Map tags, Boolean display, String vlanId, Long associatedNetworkId) { + private void addDomainSpecificNetworksToSearch(SearchCriteria additionalSC, SearchBuilder sb, Network.NetworkFilter networkFilter, + List permittedAccounts, Long domainId, String path, boolean isRecursive) { + if (!Arrays.asList(Network.NetworkFilter.Domain, Network.NetworkFilter.AccountDomain, Network.NetworkFilter.All).contains(networkFilter)) { + return; + } + + if (permittedAccounts.isEmpty()) { + // Add domain specific networks of domain + parent domains + addDomainNetworksByDomainPathToSearch(additionalSC, sb, path, isRecursive); + if (domainId == null) { + // Add networks of subdomains + Account caller = CallContext.current().getCallingAccount(); + addDomainLevelNetworksToSearch(additionalSC, sb, caller.getDomainId(), true); + } + } else { + if (domainId != null) { + // Add domain level networks + addDomainLevelNetworksToSearch(additionalSC, sb, domainId, false); + } + } + } + + private void addSharedNetworksToSearch(SearchCriteria additionalSC, SearchBuilder sb, Network.NetworkFilter networkFilter, + List permittedAccounts, String path, boolean isRecursive) { + if (!Arrays.asList(Network.NetworkFilter.Shared, Network.NetworkFilter.All).contains(networkFilter)) { + return; + } + + if (permittedAccounts.isEmpty()) { + addSharedNetworksByDomainPathToSearch(additionalSC, sb, path, isRecursive); + } else { + addSharedNetworksByAccountsToSearch(additionalSC, sb, permittedAccounts); + } + } + + private SearchCriteria createNetworkSearchCriteria(SearchBuilder sb, String keyword, Long id, + Boolean isSystem, Long zoneId, String guestIpType, String trafficType, Long physicalNetworkId, + Long networkOfferingId, String aclType, Boolean restartRequired, + Boolean specifyIpRanges, Long vpcId, Map tags, Boolean display, String vlanId, Long associatedNetworkId) { SearchCriteria sc = sb.create(); @@ -2554,12 +2685,6 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C sc.addAnd("physicalNetworkId", SearchCriteria.Op.EQ, physicalNetworkId); } - if (skipProjectNetworks) { - sc.setJoinParameters("accountSearch", "typeNEQ", Account.Type.PROJECT); - } else { - sc.setJoinParameters("accountSearch", "typeEQ", Account.Type.PROJECT); - } - if (restartRequired != null) { sc.addAnd("restartRequired", SearchCriteria.Op.EQ, restartRequired); } @@ -2600,8 +2725,9 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C return sc; } - private List listDomainLevelNetworks(SearchCriteria sc, Filter searchFilter, long domainId, boolean parentDomainsOnly) { - List networkIds = new ArrayList(); + private void addDomainLevelNetworksToSearch(SearchCriteria additionalSC, SearchBuilder sb, + long domainId, boolean parentDomainsOnly) { + List networkIds = new ArrayList<>(); Set allowedDomains = _domainMgr.getDomainParentIds(domainId); List maps = _networkDomainDao.listDomainNetworkMapByDomain(allowedDomains.toArray()); @@ -2616,48 +2742,17 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } if (!networkIds.isEmpty()) { - SearchCriteria domainSC = _networksDao.createSearchCriteria(); + SearchCriteria domainSC = sb.create(); domainSC.addAnd("id", SearchCriteria.Op.IN, networkIds.toArray()); domainSC.addAnd("aclType", SearchCriteria.Op.EQ, ACLType.Domain.toString()); - - sc.addAnd("id", SearchCriteria.Op.SC, domainSC); - return _networksDao.search(sc, searchFilter); - } else { - return new ArrayList(); + addProjectNetworksConditionToSearch(domainSC, true); + additionalSC.addOr("id", SearchCriteria.Op.SC, domainSC); } } - private List listAccountSpecificNetworks(SearchCriteria sc, Filter searchFilter, List permittedAccounts) { - SearchCriteria accountSC = _networksDao.createSearchCriteria(); - if (!permittedAccounts.isEmpty()) { - accountSC.addAnd("accountId", SearchCriteria.Op.IN, permittedAccounts.toArray()); - } - - accountSC.addAnd("aclType", SearchCriteria.Op.EQ, ACLType.Account.toString()); - - sc.addAnd("id", SearchCriteria.Op.SC, accountSC); - return _networksDao.search(sc, searchFilter); - } - - private List listAccountSpecificNetworksByDomainPath(SearchCriteria sc, Filter searchFilter, String path, boolean isRecursive) { - SearchCriteria accountSC = _networksDao.createSearchCriteria(); - accountSC.addAnd("aclType", SearchCriteria.Op.EQ, ACLType.Account.toString()); - - if (path != null) { - if (isRecursive) { - sc.setJoinParameters("domainSearch", "path", path + "%"); - } else { - sc.setJoinParameters("domainSearch", "path", path); - } - } - - sc.addAnd("id", SearchCriteria.Op.SC, accountSC); - return _networksDao.search(sc, searchFilter); - } - - private List listDomainSpecificNetworksByDomainPath(SearchCriteria sc, Filter searchFilter, String path, boolean isRecursive) { - - Set allowedDomains = new HashSet(); + private void addDomainNetworksByDomainPathToSearch(SearchCriteria additionalSC, SearchBuilder sb, + String path, boolean isRecursive) { + Set allowedDomains = new HashSet<>(); if (path != null) { if (isRecursive) { allowedDomains = _domainMgr.getDomainChildrenIds(path); @@ -2667,39 +2762,40 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } } - List networkIds = new ArrayList(); + List networkIds = new ArrayList<>(); List maps = _networkDomainDao.listDomainNetworkMapByDomain(allowedDomains.toArray()); - for (NetworkDomainVO map : maps) { networkIds.add(map.getNetworkId()); } if (!networkIds.isEmpty()) { - SearchCriteria domainSC = _networksDao.createSearchCriteria(); + SearchCriteria domainSC = sb.create(); domainSC.addAnd("id", SearchCriteria.Op.IN, networkIds.toArray()); domainSC.addAnd("aclType", SearchCriteria.Op.EQ, ACLType.Domain.toString()); - - sc.addAnd("id", SearchCriteria.Op.SC, domainSC); - return _networksDao.search(sc, searchFilter); - } else { - return new ArrayList(); + addProjectNetworksConditionToSearch(domainSC, true); + additionalSC.addOr("id", SearchCriteria.Op.SC, domainSC); } } - private List listSharedNetworks(SearchCriteria sc, Filter searchFilter, List permittedAccounts) { + private void addProjectNetworksConditionToSearch(SearchCriteria sc, boolean skipProjectNetworks) { + sc.getJoin("account").addAnd("type", skipProjectNetworks ? Op.NEQ : Op.EQ, Account.Type.PROJECT); + sc.addAnd("id", Op.SC, sc.getJoin("account")); + } + + private void addSharedNetworksByAccountsToSearch(SearchCriteria additionalSC, SearchBuilder sb, + List permittedAccounts) { List sharedNetworkIds = _networkPermissionDao.listPermittedNetworkIdsByAccounts(permittedAccounts); if (!sharedNetworkIds.isEmpty()) { - SearchCriteria ssc = _networksDao.createSearchCriteria(); + SearchCriteria ssc = sb.create(); ssc.addAnd("id", SearchCriteria.Op.IN, sharedNetworkIds.toArray()); - sc.addAnd("id", SearchCriteria.Op.SC, ssc); - return _networksDao.search(sc, searchFilter); + addProjectNetworksConditionToSearch(ssc, true); + additionalSC.addOr("id", SearchCriteria.Op.SC, ssc); } - return new ArrayList(); } - private List listSharedNetworksByDomainPath(SearchCriteria sc, Filter searchFilter, String path, boolean isRecursive) { - Set allowedDomains = new HashSet(); + private void addSharedNetworksByDomainPathToSearch(SearchCriteria additionalSC, SearchBuilder sb, String path, boolean isRecursive) { + Set allowedDomains = new HashSet<>(); if (path != null) { if (isRecursive) { allowedDomains = _domainMgr.getDomainChildrenIds(path); @@ -2708,7 +2804,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C allowedDomains.add(domain.getId()); } } - List allowedDomainsList = new ArrayList(allowedDomains); + List allowedDomainsList = new ArrayList<>(allowedDomains); if (!allowedDomainsList.isEmpty()) { GenericSearchBuilder accountIdSearch = _accountDao.createSearchBuilder(Long.class); @@ -2721,13 +2817,12 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C List sharedNetworkIds = _networkPermissionDao.listPermittedNetworkIdsByAccounts(allowedAccountsList); if (!sharedNetworkIds.isEmpty()) { - SearchCriteria ssc = _networksDao.createSearchCriteria(); + SearchCriteria ssc = sb.create(); ssc.addAnd("id", SearchCriteria.Op.IN, sharedNetworkIds.toArray()); - sc.addAnd("id", SearchCriteria.Op.SC, ssc); - return _networksDao.search(sc, searchFilter); + addProjectNetworksConditionToSearch(ssc, true); + additionalSC.addOr("id", SearchCriteria.Op.SC, ssc); } } - return new ArrayList(); } @Override @@ -2795,7 +2890,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C * TODO Restarting a SDN based network requires updating the nics and the configuration * in the controller. This requires a non-trivial rewrite of the restart procedure. */ - throw new InvalidParameterException("Unable to restart a running SDN network."); + throw new InvalidParameterValueException("Unable to restart a running SDN network."); } Account callerAccount = _accountMgr.getActiveAccountById(user.getAccountId()); @@ -3050,8 +3145,12 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } // network offering and domain suffix can be updated for Isolated networks only in 3.0 - if ((networkOfferingId != null || domainSuffix != null) && network.getGuestType() != GuestType.Isolated) { - throw new InvalidParameterValueException("NetworkOffering and domain suffix upgrade can be performed for Isolated networks only"); + if (networkOfferingId != null && network.getGuestType() != GuestType.Isolated) { + throw new InvalidParameterValueException("NetworkOffering update can be performed for Isolated networks only."); + } + // network offering and domain suffix can be updated for Isolated networks only in 3.0 + if (domainSuffix != null && ! Arrays.asList(GuestType.Isolated, GuestType.Shared).contains(network.getGuestType())) { + throw new InvalidParameterValueException("Domain suffix update can only be performed for Isolated and shared networks."); } boolean networkOfferingChanged = false; @@ -4007,6 +4106,14 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C return false; } + // network mode should be the same + NetworkOffering.NetworkMode oldNetworkMode = oldNetworkOffering.getNetworkMode() == null ? NetworkOffering.NetworkMode.NATTED: oldNetworkOffering.getNetworkMode(); + NetworkOffering.NetworkMode newNetworkMode = newNetworkOffering.getNetworkMode() == null ? NetworkOffering.NetworkMode.NATTED: newNetworkOffering.getNetworkMode(); + if (!oldNetworkMode.equals(newNetworkMode)) { + logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different values for network mode, can't upgrade"); + return false; + } + return canMoveToPhysicalNetwork(network, oldNetworkOfferingId, newNetworkOfferingId); } @@ -4040,11 +4147,11 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } } if (tags != null && tags.size() > 1) { - throw new InvalidParameterException("Only one tag can be specified for a physical network at this time"); + throw new InvalidParameterValueException("Only one tag can be specified for a physical network at this time"); } if (isolationMethods != null && isolationMethods.size() > 1) { - throw new InvalidParameterException("Only one isolationMethod can be specified for a physical network at this time"); + throw new InvalidParameterValueException("Only one isolationMethod can be specified for a physical network at this time"); } if (vnetRange != null && zoneType == NetworkType.Basic) { @@ -4113,7 +4220,11 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C addDefaultInternalLbProviderToPhysicalNetwork(pNetwork.getId()); //Add tungsten network service provider - addDefaultTungstenProviderToPhysicalNetwork(pNetwork.getId()); + try { + addDefaultTungstenProviderToPhysicalNetwork(pNetwork.getId()); + } catch (Exception ex) { + logger.warn("Failed to add Tungsten provider to physical network due to:" + ex.getMessage()); + } // Add the config drive provider addConfigDriveToPhysicalNetwork(pNetwork.getId()); @@ -4183,7 +4294,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C } if (tags != null && tags.size() > 1) { - throw new InvalidParameterException("Unable to support more than one tag on network yet"); + throw new InvalidParameterValueException("Unable to support more than one tag on network yet"); } // If tags are null, then check if there are any other networks with null tags diff --git a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java index 5881619791e..5e7a4a0c4ef 100644 --- a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java +++ b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java @@ -19,6 +19,7 @@ package com.cloud.network.as; import java.security.SecureRandom; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.List; @@ -37,6 +38,7 @@ import java.util.concurrent.TimeUnit; import javax.inject.Inject; +import com.cloud.network.NetworkModel; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.affinity.AffinityGroupVO; import org.apache.cloudstack.affinity.dao.AffinityGroupDao; @@ -70,6 +72,7 @@ import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.userdata.UserDataManager; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.RandomStringUtils; @@ -250,8 +253,12 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage @Inject NetworkOrchestrationService networkMgr; @Inject + NetworkModel networkModel; + @Inject private UserVmManager userVmMgr; @Inject + private UserDataManager userDataMgr; + @Inject private UserVmDao userVmDao; @Inject private HostDao hostDao; @@ -571,7 +578,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage userDataDetails = cmd.getUserDataDetails().toString(); } userData = userVmMgr.finalizeUserData(userData, userDataId, template); - userData = userVmMgr.validateUserData(userData, cmd.getHttpMethod()); + userData = userDataMgr.validateUserData(userData, cmd.getHttpMethod()); if (userData != null) { profileVO.setUserData(userData); } @@ -650,7 +657,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage } VirtualMachineTemplate template = entityMgr.findByIdIncludingRemoved(VirtualMachineTemplate.class, templateId); userData = userVmMgr.finalizeUserData(userData, userDataId, template); - userData = userVmMgr.validateUserData(userData, cmd.getHttpMethod()); + userData = userDataMgr.validateUserData(userData, cmd.getHttpMethod()); vmProfile.setUserDataId(userDataId); vmProfile.setUserData(userData); vmProfile.setUserDataDetails(userDataDetails); @@ -1805,7 +1812,8 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage null, null, true, null, affinityGroupIdList, customParameters, null, null, null, null, true, overrideDiskOfferingId); } else { - if (zone.isSecurityGroupEnabled()) { + if (networkModel.checkSecurityGroupSupportForNetwork(owner, zone, networkIds, + Collections.emptyList())) { vm = userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, template, networkIds, null, owner, vmHostName,vmHostName, diskOfferingId, dataDiskSize, null, hypervisorType, HTTPMethod.GET, userData, userDataId, userDataDetails, sshKeyPairs, diff --git a/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java b/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java index a9fa3e95275..3449f1f5d00 100644 --- a/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java +++ b/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.network.element; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; @@ -24,6 +25,7 @@ import java.util.Set; import javax.inject.Inject; +import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; @@ -90,7 +92,8 @@ import com.cloud.vm.VmDetailConstants; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.UserVmDetailsDao; -public class ConfigDriveNetworkElement extends AdapterBase implements NetworkElement, UserDataServiceProvider, +public class ConfigDriveNetworkElement extends AdapterBase implements NetworkElement, + UserDataServiceProvider, DhcpServiceProvider, DnsServiceProvider, StateListener, NetworkMigrationResponder { private static final Map> capabilities = setCapabilities(); @@ -110,6 +113,8 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle @Inject NetworkModel _networkModel; @Inject + NetworkOrchestrationService _networkOrchestrationService; + @Inject GuestOSCategoryDao _guestOSCategoryDao; @Inject GuestOSDao _guestOSDao; @@ -197,6 +202,8 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle private static Map> setCapabilities() { Map> capabilities = new HashMap<>(); capabilities.put(Service.UserData, null); + capabilities.put(Service.Dhcp, new HashMap<>()); + capabilities.put(Service.Dns, new HashMap<>()); return capabilities; } @@ -224,8 +231,7 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle public boolean addPasswordAndUserdata(Network network, NicProfile nic, VirtualMachineProfile profile, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException { return (canHandle(network.getTrafficType()) - && configureConfigDriveData(profile, nic, dest)) - && createConfigDriveIso(profile, dest, null); + && configureConfigDriveData(profile, nic, dest)); } @Override @@ -342,10 +348,13 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle configureConfigDriveData(vm, nic, dest); // Create the config drive on dest host cache - createConfigDriveIsoOnHostCache(vm, dest.getHost().getId()); + createConfigDriveIsoOnHostCache(nic, vm, dest.getHost().getId()); } else { vm.setConfigDriveLocation(getConfigDriveLocation(vm.getId())); - addPasswordAndUserdata(network, nic, vm, dest, context); + boolean result = addPasswordAndUserdata(network, nic, vm, dest, context); + if (result) { + createConfigDriveIso(nic, vm, dest, null); + } } } catch (InsufficientCapacityException | ResourceUnavailableException e) { logger.error("Failed to add config disk drive due to: ", e); @@ -398,7 +407,7 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle vm.getUuid(), nic.getMacAddress(), userVm.getDetail("SSH.PublicKey"), (String) vm.getParameter(VirtualMachineProfile.Param.VmPassword), isWindows, VirtualMachineManager.getHypervisorHostname(dest.getHost() != null ? dest.getHost().getName() : "")); vm.setVmData(vmData); vm.setConfigDriveLabel(VirtualMachineManager.VmConfigDriveLabel.value()); - createConfigDriveIso(vm, dest, diskToUse); + createConfigDriveIso(nic, vm, dest, diskToUse); } } } @@ -528,7 +537,7 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle return false; } - private boolean createConfigDriveIsoOnHostCache(VirtualMachineProfile profile, Long hostId) throws ResourceUnavailableException { + private boolean createConfigDriveIsoOnHostCache(NicProfile nic, VirtualMachineProfile profile, Long hostId) throws ResourceUnavailableException { if (hostId == null) { throw new ResourceUnavailableException("Config drive iso creation failed, dest host not available", ConfigDriveNetworkElement.class, 0L); @@ -540,7 +549,9 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle final String isoFileName = ConfigDrive.configIsoFileName(profile.getInstanceName()); final String isoPath = ConfigDrive.createConfigDrivePath(profile.getInstanceName()); - final String isoData = ConfigDriveBuilder.buildConfigDrive(profile.getVmData(), isoFileName, profile.getConfigDriveLabel(), customUserdataParamMap); + List nicProfiles = _networkOrchestrationService.getNicProfiles(nic.getVirtualMachineId(), profile.getHypervisorType()); + final Map> supportedServices = getSupportedServicesByElementForNetwork(nicProfiles); + final String isoData = ConfigDriveBuilder.buildConfigDrive(nicProfiles, profile.getVmData(), isoFileName, profile.getConfigDriveLabel(), customUserdataParamMap, supportedServices); final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, isoData, null, false, true, true); final HandleConfigDriveIsoAnswer answer = (HandleConfigDriveIsoAnswer) agentManager.easySend(hostId, configDriveIsoCommand); @@ -590,7 +601,27 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle return true; } - private boolean createConfigDriveIso(VirtualMachineProfile profile, DeployDestination dest, DiskTO disk) throws ResourceUnavailableException { + private Map> getSupportedServicesByElementForNetwork(List nics) { + + Map> supportedServices = new HashMap<>(); + for (NicProfile nic: nics) { + ArrayList serviceList = new ArrayList<>(); + if (_networkModel.isProviderSupportServiceInNetwork(nic.getNetworkId(), Service.Dns, getProvider())) { + serviceList.add(Service.Dns); + } + if (_networkModel.isProviderSupportServiceInNetwork(nic.getNetworkId(), Service.UserData, getProvider())) { + serviceList.add(Service.UserData); + } + if (_networkModel.isProviderSupportServiceInNetwork(nic.getNetworkId(), Service.Dhcp, getProvider())) { + serviceList.add(Service.Dhcp); + } + supportedServices.put(nic.getId(), serviceList); + } + + return supportedServices; + } + + public boolean createConfigDriveIso(NicProfile nic, VirtualMachineProfile profile, DeployDestination dest, DiskTO disk) throws ResourceUnavailableException { DataStore dataStore = getDatastoreForConfigDriveIso(disk, profile, dest); final Long agentId = findAgentId(profile, dest, dataStore); @@ -605,7 +636,10 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle final String isoFileName = ConfigDrive.configIsoFileName(profile.getInstanceName()); final String isoPath = ConfigDrive.createConfigDrivePath(profile.getInstanceName()); - final String isoData = ConfigDriveBuilder.buildConfigDrive(profile.getVmData(), isoFileName, profile.getConfigDriveLabel(), customUserdataParamMap); + List nicProfiles = _networkOrchestrationService.getNicProfiles(nic.getVirtualMachineId(), profile.getHypervisorType()); + final Map> supportedServices = getSupportedServicesByElementForNetwork(nicProfiles); + final String isoData = ConfigDriveBuilder.buildConfigDrive( + nicProfiles, profile.getVmData(), isoFileName, profile.getConfigDriveLabel(), customUserdataParamMap, supportedServices); boolean useHostCacheOnUnsupportedPool = VirtualMachineManager.VmConfigDriveUseHostCacheOnUnsupportedPool.valueIn(dest.getDataCenter().getId()); boolean preferHostCache = VirtualMachineManager.VmConfigDriveForceHostCacheUse.valueIn(dest.getDataCenter().getId()); final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, isoData, dataStore.getTO(), useHostCacheOnUnsupportedPool, preferHostCache, true); @@ -758,4 +792,52 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkEle return true; } + @Override + public boolean addDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile vm, DeployDestination dest, + ReservationContext context) throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException { + // Update nic profile with required information. + // Add network checks + return true; + } + + @Override + public boolean configDhcpSupportForSubnet(Network network, NicProfile nic, VirtualMachineProfile vm, + DeployDestination dest, + ReservationContext context) throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException { + return false; + } + + @Override + public boolean removeDhcpSupportForSubnet(Network network) throws ResourceUnavailableException { + return true; + } + + @Override + public boolean setExtraDhcpOptions(Network network, long nicId, Map dhcpOptions) { + return false; + } + + @Override + public boolean removeDhcpEntry(Network network, NicProfile nic, + VirtualMachineProfile vmProfile) throws ResourceUnavailableException { + return true; + } + + @Override + public boolean addDnsEntry(Network network, NicProfile nic, VirtualMachineProfile vm, DeployDestination dest, + ReservationContext context) throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException { + return true; + } + + @Override + public boolean configDnsSupportForSubnet(Network network, NicProfile nic, VirtualMachineProfile vm, + DeployDestination dest, + ReservationContext context) throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException { + return true; + } + + @Override + public boolean removeDnsSupportForSubnet(Network network) throws ResourceUnavailableException { + return true; + } } diff --git a/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java b/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java index a4d48519f43..a87504cd07a 100644 --- a/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java +++ b/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java @@ -24,7 +24,9 @@ import java.util.Set; import javax.inject.Inject; +import org.apache.cloudstack.network.BgpPeer; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.ObjectUtils; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; @@ -95,6 +97,7 @@ import com.cloud.network.rules.LoadBalancerContainer; import com.cloud.network.rules.PortForwardingRule; import com.cloud.network.rules.RulesManager; import com.cloud.network.rules.StaticNat; +import com.cloud.network.vpc.Vpc; import com.cloud.offering.NetworkOffering; import com.cloud.offerings.NetworkOfferingVO; import com.cloud.offerings.dao.NetworkOfferingDao; @@ -119,7 +122,7 @@ import com.cloud.vm.dao.UserVmDao; public class VirtualRouterElement extends AdapterBase implements VirtualRouterElementService, DhcpServiceProvider, UserDataServiceProvider, SourceNatServiceProvider, StaticNatServiceProvider, FirewallServiceProvider, LoadBalancingServiceProvider, PortForwardingServiceProvider, RemoteAccessVPNServiceProvider, IpDeployer, -NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServiceProvider{ +NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServiceProvider, BgpServiceProvider { protected static final Map> capabilities = setCapabilities(); @Inject @@ -553,7 +556,9 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ dhcpCapabilities.put(Capability.DhcpAccrossMultipleSubnets, "true"); capabilities.put(Service.Dhcp, dhcpCapabilities); - capabilities.put(Service.Gateway, null); + final Map gatewayCapabilities = new HashMap(); + gatewayCapabilities.put(Capability.RedundantRouter, "true"); + capabilities.put(Service.Gateway, gatewayCapabilities); final Map sourceNatCapabilities = new HashMap(); sourceNatCapabilities.put(Capability.SupportedSourceNatTypes, "peraccount"); @@ -1391,4 +1396,36 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ _routerDao.persist(router); } } + + @Override + public boolean applyBgpPeers(Vpc vpc, Network network, List bgpPeers) throws ResourceUnavailableException { + if (ObjectUtils.allNull(vpc, network)) { + throw new CloudRuntimeException("One of VPC and network must be passed, however both are null."); + } + final List routers; + if (vpc != null) { + routers = _routerDao.listByVpcId(vpc.getId()); + } else { + routers = _routerDao.listByNetworkAndRole(network.getId(), VirtualRouter.Role.VIRTUAL_ROUTER); + } + + if (CollectionUtils.isEmpty(routers)) { + logger.warn(String.format("Can't find at least one router for vpc %s or network %s !", vpc, network)); + return true; + } + + boolean result = true; + long dataCenterId = vpc != null ? vpc.getZoneId() : network.getDataCenterId(); + final DataCenterVO dcVO = _dcDao.findById(dataCenterId); + final NetworkTopology networkTopology = networkTopologyContext.retrieveNetworkTopology(dcVO); + + for (final DomainRouterVO domainRouterVO : routers) { + if (domainRouterVO.getState() != VirtualMachine.State.Running) { + continue; + } + + result = result && networkTopology.applyBgpPeers(network, bgpPeers, domainRouterVO); + } + return result; + } } diff --git a/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java b/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java index 0a1114b8307..841f6221182 100644 --- a/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java +++ b/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java @@ -149,7 +149,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc public boolean implementVpc(final Vpc vpc, final DeployDestination dest, final ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - final Map params = new HashMap(1); + final Map params = new HashMap<>(1); params.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true); if (vpc.isRollingRestart()) { @@ -194,7 +194,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc return false; } - final Map params = new HashMap(1); + final Map params = new HashMap<>(1); params.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true); if (network.isRollingRestart()) { @@ -221,24 +221,58 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc return true; } - protected void configureGuestNetwork(final Network network, final List routers ) + protected boolean configureGuestNetworkForRouter(final Network network, + final DomainRouterVO router) throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException { + if (!_networkMdl.isVmPartOfNetwork(router.getId(), network.getId())) { + final Map paramsForRouter = new HashMap<>(1); + if (network.getState() == State.Setup) { + paramsForRouter.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true); + } + if (!_vpcRouterMgr.addVpcRouterToGuestNetwork(router, network, paramsForRouter)) { + logger.error("Failed to add VPC router {} to guest network {}", router, network); + return false; + } else { + logger.debug("Successfully added VPC router {} to guest network {}", router, network); + return true; + } + } + return true; + } + + protected void configureGuestNetwork(final Network network, final List routers) throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException { - logger.info("Adding VPC routers to Guest Network: " + routers.size() + " to be added!"); + logger.info("Adding VPC routers to Guest Network: {} to be added!", routers.size()); - for (final DomainRouterVO router : routers) { + List backupRouters = new ArrayList<>(); + List remainingRouters = new ArrayList<>(); + for (DomainRouterVO router : routers) { if (!_networkMdl.isVmPartOfNetwork(router.getId(), network.getId())) { - final Map paramsForRouter = new HashMap(1); - if (network.getState() == State.Setup) { - paramsForRouter.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true); - } - if (!_vpcRouterMgr.addVpcRouterToGuestNetwork(router, network, paramsForRouter)) { - logger.error("Failed to add VPC router " + router + " to guest network " + network); + if (router.getRedundantState().equals(DomainRouterVO.RedundantState.BACKUP)) { + backupRouters.add(router); } else { - logger.debug("Successfully added VPC router " + router + " to guest network " + network); + remainingRouters.add(router); } } } + + for (final DomainRouterVO router : backupRouters) { + if (network.getState() != State.Setup) { + if (!_vpcRouterMgr.stopKeepAlivedOnRouter(router, network)) { + logger.error("Failed to stop keepalived on VPC router {} to guest network {}", router, network); + } else { + logger.debug("Successfully stopped keepalived on VPC router {} to guest network {}", router, network); + } + } + } + for (final DomainRouterVO router : remainingRouters) { + configureGuestNetworkForRouter(network, router); + } + for (final DomainRouterVO router : backupRouters) { + if (!configureGuestNetworkForRouter(network, router) && !_vpcRouterMgr.startKeepAlivedOnRouter(router, network)) { + logger.error("Failed to start keepalived on VPC router {} to guest network {}", router, network); + } + } } @Override @@ -258,7 +292,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc } if (vm.getType() == VirtualMachine.Type.User) { - final Map params = new HashMap(1); + final Map params = new HashMap<>(1); params.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true); final RouterDeploymentDefinition routerDeploymentDefinition = routerDeploymentDefinitionBuilder.create() @@ -283,30 +317,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc @Override public boolean shutdown(final Network network, final ReservationContext context, final boolean cleanup) throws ConcurrentOperationException, ResourceUnavailableException { - final Long vpcId = network.getVpcId(); - if (vpcId == null) { - logger.debug("Network " + network + " doesn't belong to any vpc, so skipping unplug nic part"); - return true; - } - - boolean success = true; - final List routers = _routerDao.listByVpcId(vpcId); - for (final VirtualRouter router : routers) { - // 1) Check if router is already a part of the network - if (!_networkMdl.isVmPartOfNetwork(router.getId(), network.getId())) { - logger.debug("Router " + router + " is not a part the network " + network); - continue; - } - // 2) Call unplugNics in the network service - success = success && _vpcRouterMgr.removeVpcRouterFromGuestNetwork(router, network); - if (!success) { - logger.warn("Failed to unplug nic in network " + network + " for virtual router " + router); - } else { - logger.debug("Successfully unplugged nic in network " + network + " for virtual router " + router); - } - } - - return success; + return destroy(network, context); } @Override @@ -385,16 +396,16 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc } private static Map> setCapabilities() { - final Map> capabilities = new HashMap>(); + final Map> capabilities = new HashMap<>(); capabilities.putAll(VirtualRouterElement.capabilities); - final Map sourceNatCapabilities = new HashMap(); + final Map sourceNatCapabilities = new HashMap<>(); sourceNatCapabilities.putAll(capabilities.get(Service.SourceNat)); // TODO This kind of logic is already placed in the DB sourceNatCapabilities.put(Capability.RedundantRouter, "true"); capabilities.put(Service.SourceNat, sourceNatCapabilities); - final Map vpnCapabilities = new HashMap(); + final Map vpnCapabilities = new HashMap<>(); vpnCapabilities.putAll(capabilities.get(Service.Vpn)); vpnCapabilities.put(Capability.VpnTypes, "s2svpn"); capabilities.put(Service.Vpn, vpnCapabilities); @@ -667,7 +678,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc final NetworkTopology networkTopology = networkTopologyContext.retrieveNetworkTopology(dcVO); String[] result = null; - final List combinedResults = new ArrayList(); + final List combinedResults = new ArrayList<>(); for (final DomainRouterVO domainRouterVO : routers) { result = networkTopology.applyVpnUsers(vpn, users, domainRouterVO); combinedResults.addAll(Arrays.asList(result)); diff --git a/server/src/main/java/com/cloud/network/firewall/FirewallManagerImpl.java b/server/src/main/java/com/cloud/network/firewall/FirewallManagerImpl.java index e9a93528d05..7194b86e3e1 100644 --- a/server/src/main/java/com/cloud/network/firewall/FirewallManagerImpl.java +++ b/server/src/main/java/com/cloud/network/firewall/FirewallManagerImpl.java @@ -39,6 +39,7 @@ import org.apache.cloudstack.api.command.user.ipv6.ListIpv6FirewallRulesCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.network.RoutedIpv4Manager; import org.springframework.stereotype.Component; import com.cloud.configuration.Config; @@ -154,6 +155,8 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, List _networkAclElements; @Inject IpAddressManager _ipAddrMgr; + @Inject + RoutedIpv4Manager routedIpv4Manager; private boolean _elbEnabled = false; static Boolean rulesContinueOnErrFlag = true; @@ -202,57 +205,54 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, return createFirewallRule(sourceIpAddressId, caller, rule.getXid(), rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol(), rule.getSourceCidrList(), null, rule.getIcmpCode(), rule.getIcmpType(), null, rule.getType(), rule.getNetworkId(), rule.getTrafficType(), rule.isDisplay()); } + //Destination CIDR capability is currently implemented for egress rules only. For others, the field is passed as null. @DB - protected FirewallRule createFirewallRule(final Long ipAddrId, Account caller, final String xId, final Integer portStart, final Integer portEnd, - final String protocol, final List sourceCidrList, final List destCidrList, final Integer icmpCode, final Integer icmpType, final Long relatedRuleId, - final FirewallRule.FirewallRuleType type, - final Long networkId, final FirewallRule.TrafficType trafficType, final Boolean forDisplay) throws NetworkRuleConflictException { - + protected FirewallRule createFirewallRule(final Long ipAddrId, Account caller, final String xId, final Integer portStart, final Integer portEnd, final String protocol, + final List sourceCidrList, final List destCidrList, final Integer icmpCode, final Integer icmpType, final Long relatedRuleId, + final FirewallRule.FirewallRuleType type, final Long networkId, final FirewallRule.TrafficType trafficType, final Boolean forDisplay) throws NetworkRuleConflictException { IPAddressVO ipAddress = null; - if (ipAddrId != null) { - // this for ingress firewall rule, for egress id is null - ipAddress = _ipAddressDao.findById(ipAddrId); - // Validate ip address - if (ipAddress == null && type == FirewallRule.FirewallRuleType.User) { - throw new InvalidParameterValueException("Unable to create firewall rule; " + "couldn't locate IP address by id in the system"); - } - _networkModel.checkIpForService(ipAddress, Service.Firewall, null); - } + try { + // Validate ip address + if (ipAddrId != null) { + // this for ingress firewall rule, for egress id is null + ipAddress = _ipAddressDao.acquireInLockTable(ipAddrId); + if (ipAddress == null) { + throw new InvalidParameterValueException("Unable to create firewall rule; " + "couldn't locate IP address by id in the system"); + } + _networkModel.checkIpForService(ipAddress, Service.Firewall, null); + } - validateFirewallRule(caller, ipAddress, portStart, portEnd, protocol, Purpose.Firewall, type, networkId, trafficType); + validateFirewallRule(caller, ipAddress, portStart, portEnd, protocol, Purpose.Firewall, type, networkId, trafficType); - // icmp code and icmp type can't be passed in for any other protocol rather than icmp - if (!protocol.equalsIgnoreCase(NetUtils.ICMP_PROTO) && (icmpCode != null || icmpType != null)) { - throw new InvalidParameterValueException("Can specify icmpCode and icmpType for ICMP protocol only"); - } + // icmp code and icmp type can't be passed in for any other protocol rather than icmp + if (!protocol.equalsIgnoreCase(NetUtils.ICMP_PROTO) && (icmpCode != null || icmpType != null)) { + throw new InvalidParameterValueException("Can specify icmpCode and icmpType for ICMP protocol only"); + } - if (protocol.equalsIgnoreCase(NetUtils.ICMP_PROTO) && (portStart != null || portEnd != null)) { - throw new InvalidParameterValueException("Can't specify start/end port when protocol is ICMP"); - } + if (protocol.equalsIgnoreCase(NetUtils.ICMP_PROTO) && (portStart != null || portEnd != null)) { + throw new InvalidParameterValueException("Can't specify start/end port when protocol is ICMP"); + } - Long accountId = null; - Long domainId = null; + Long accountId = null; + Long domainId = null; - if (ipAddress != null) { - //Ingress firewall rule - accountId = ipAddress.getAllocatedToAccountId(); - domainId = ipAddress.getAllocatedInDomainId(); - } else if (networkId != null) { - //egress firewall rule + if (ipAddress != null) { + //Ingress firewall rule + accountId = ipAddress.getAllocatedToAccountId(); + domainId = ipAddress.getAllocatedInDomainId(); + } else if (networkId != null) { + //egress firewall rule Network network = _networkModel.getNetwork(networkId); accountId = network.getAccountId(); domainId = network.getDomainId(); - } + } - final Long accountIdFinal = accountId; - final Long domainIdFinal = domainId; - return Transaction.execute(new TransactionCallbackWithException() { - @Override - public FirewallRuleVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException { - FirewallRuleVO newRule = - new FirewallRuleVO(xId, ipAddrId, portStart, portEnd, protocol.toLowerCase(), networkId, accountIdFinal, domainIdFinal, Purpose.Firewall, - sourceCidrList, destCidrList, icmpCode, icmpType, relatedRuleId, trafficType); + final Long accountIdFinal = accountId; + final Long domainIdFinal = domainId; + return Transaction.execute((TransactionCallbackWithException) status -> { + FirewallRuleVO newRule = new FirewallRuleVO(xId, ipAddrId, portStart, portEnd, protocol.toLowerCase(), networkId, accountIdFinal, domainIdFinal, Purpose.Firewall, + sourceCidrList, destCidrList, icmpCode, icmpType, relatedRuleId, trafficType); newRule.setType(type); if (forDisplay != null) { newRule.setDisplay(forDisplay); @@ -269,8 +269,12 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, CallContext.current().putContextParameter(FirewallRule.class, newRule.getId()); return newRule; + }); + } finally { + if (ipAddrId != null) { + _ipAddressDao.releaseFromLockTable(ipAddrId); } - }); + } } @Override @@ -537,6 +541,9 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, } else if (purpose == Purpose.PortForwarding) { caps = _networkModel.getNetworkServiceCapabilities(network.getId(), Service.PortForwarding); } else if (purpose == Purpose.Firewall) { + if (routedIpv4Manager.isVirtualRouterGateway(network)) { + throw new CloudRuntimeException("Unable to create routing firewall rule. Please use routing firewall API instead."); + } caps = _networkModel.getNetworkServiceCapabilities(network.getId(), Service.Firewall); } @@ -676,9 +683,19 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, } @Override + @DB public boolean applyIngressFirewallRules(long ipId, Account caller) throws ResourceUnavailableException { - List rules = _firewallDao.listByIpAndPurpose(ipId, Purpose.Firewall); - return applyFirewallRules(rules, false, caller); + try { + IPAddressVO ipAddress = _ipAddressDao.acquireInLockTable(ipId); + if (ipAddress == null) { + logger.error(String.format("Unable to acquire lock for public IP [%s].", ipId)); + throw new CloudRuntimeException("Unable to acquire lock for public IP."); + } + List rules = _firewallDao.listByIpAndPurpose(ipId, Purpose.Firewall); + return applyFirewallRules(rules, false, caller); + } finally { + _ipAddressDao.releaseFromLockTable(ipId); + } } @Override @@ -798,16 +815,12 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, if (apply) { // ingress firewall rule if (rule.getSourceIpAddressId() != null) { - //feteches ingress firewall, ingress firewall rules associated with the ip + //fetches ingress firewall, ingress firewall rules associated with the ip List rules = _firewallDao.listByIpAndPurpose(rule.getSourceIpAddressId(), Purpose.Firewall); return applyFirewallRules(rules, false, caller); - //egress firewall rule } else if (networkId != null) { - boolean isIpv6 = Purpose.Ipv6Firewall.equals(rule.getPurpose()); - List rules = _firewallDao.listByNetworkPurposeTrafficType(rule.getNetworkId(), rule.getPurpose(), FirewallRule.TrafficType.Egress); - if (isIpv6) { - rules.addAll(_firewallDao.listByNetworkPurposeTrafficType(rule.getNetworkId(), Purpose.Ipv6Firewall, FirewallRule.TrafficType.Ingress)); - } + //egress firewall rule, or ipv4/ipv6 routing firewall rule + List rules = _firewallDao.listByNetworkPurposeTrafficType(rule.getNetworkId(), rule.getPurpose(), rule.getTrafficType()); return applyFirewallRules(rules, false, caller); } } else { diff --git a/server/src/main/java/com/cloud/network/guru/ExternalGuestNetworkGuru.java b/server/src/main/java/com/cloud/network/guru/ExternalGuestNetworkGuru.java index bdabc6c03a1..4f76488337d 100644 --- a/server/src/main/java/com/cloud/network/guru/ExternalGuestNetworkGuru.java +++ b/server/src/main/java/com/cloud/network/guru/ExternalGuestNetworkGuru.java @@ -24,6 +24,8 @@ import javax.inject.Inject; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; +import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMap; +import org.apache.cloudstack.network.RoutedIpv4Manager; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenter.NetworkType; @@ -35,6 +37,7 @@ import com.cloud.event.EventTypes; import com.cloud.event.EventVO; import com.cloud.exception.InsufficientAddressCapacityException; import com.cloud.exception.InsufficientVirtualNetworkCapacityException; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.network.IpAddressManager; import com.cloud.network.Network; import com.cloud.network.Network.GuestType; @@ -87,6 +90,8 @@ public class ExternalGuestNetworkGuru extends GuestNetworkGuru { FirewallRulesDao _fwRulesDao; @Inject FirewallRulesCidrsDao _fwRulesCidrDao; + @Inject + RoutedIpv4Manager routedIpv4Manager; public ExternalGuestNetworkGuru() { super(); @@ -121,6 +126,23 @@ public class ExternalGuestNetworkGuru extends GuestNetworkGuru { /* In order to revert userSpecified network setup */ config.setState(State.Allocated); } + if (NetworkOffering.NetworkMode.ROUTED.equals(offering.getNetworkMode()) && !offering.isForVpc()) { + if (userSpecified.getCidr() != null) { + routedIpv4Manager.getOrCreateIpv4SubnetForGuestNetwork(config, userSpecified.getCidr()); + } else { + if (userSpecified.getNetworkCidrSize() == null) { + throw new InvalidParameterValueException("The network CIDR or CIDR size must be specified."); + } + Ipv4GuestSubnetNetworkMap subnet = routedIpv4Manager.getOrCreateIpv4SubnetForGuestNetwork(owner.getDomainId(), owner.getAccountId(), config.getDataCenterId(), userSpecified.getNetworkCidrSize()); + if (subnet != null) { + final String[] cidrTuple = subnet.getSubnet().split("\\/"); + config.setGateway(NetUtils.getIpRangeStartIpFromCidr(cidrTuple[0], Long.parseLong(cidrTuple[1]))); + config.setCidr(subnet.getSubnet()); + } else { + throw new InvalidParameterValueException("Failed to allocate a CIDR with requested size."); + } + } + } return updateNetworkDesignForIPv6IfNeeded(config, userSpecified); } diff --git a/server/src/main/java/com/cloud/network/guru/GuestNetworkGuru.java b/server/src/main/java/com/cloud/network/guru/GuestNetworkGuru.java index ae8ee1ef3aa..c46be9bf428 100644 --- a/server/src/main/java/com/cloud/network/guru/GuestNetworkGuru.java +++ b/server/src/main/java/com/cloud/network/guru/GuestNetworkGuru.java @@ -241,7 +241,9 @@ public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGur if (userSpecified.getCidr() != null) { network.setCidr(userSpecified.getCidr()); network.setGateway(userSpecified.getGateway()); - } else if (offering.getGuestType() != GuestType.L2 && (offering.getGuestType() == GuestType.Shared || !_networkModel.listNetworkOfferingServices(offering.getId()).isEmpty())) { + } else if (offering.getGuestType() != GuestType.L2 + && !NetworkOffering.NetworkMode.ROUTED.equals(offering.getNetworkMode()) + && (offering.getGuestType() == GuestType.Shared || !_networkModel.listNetworkOfferingServices(offering.getId()).isEmpty())) { final String guestNetworkCidr = dc.getGuestNetworkCidr(); if (guestNetworkCidr != null) { final String[] cidrTuple = guestNetworkCidr.split("\\/"); @@ -409,11 +411,12 @@ public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGur if (network.getVpcId() != null) { final Vpc vpc = _vpcDao.findById(network.getVpcId()); // Redundant Networks need a guest IP that is not the same as the gateway IP. - if (_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.SourceNat, Provider.VPCVirtualRouter) && !vpc.isRedundant()) { + if (_networkModel.isAnyServiceSupportedInNetwork(network.getId(), Provider.VPCVirtualRouter, Service.SourceNat, Service.Gateway) + && !vpc.isRedundant()) { isGateway = true; } } else { - if (_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.SourceNat, Provider.VirtualRouter)) { + if (_networkModel.isAnyServiceSupportedInNetwork(network.getId(), Provider.VirtualRouter, Service.SourceNat, Service.Gateway)) { isGateway = true; } } @@ -444,6 +447,7 @@ public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGur } else { guestIp = _ipAddrMgr.acquireGuestIpAddress(network, nic.getRequestedIPv4()); } + nic.setIpv4AllocationRaceCheck(true); } if (guestIp == null && network.getGuestType() != GuestType.L2 && !_networkModel.listNetworkOfferingServices(network.getNetworkOfferingId()).isEmpty()) { throw new InsufficientVirtualNetworkCapacityException("Unable to acquire Guest IP" + " address for network " + network, DataCenter.class, diff --git a/server/src/main/java/com/cloud/network/guru/PublicNetworkGuru.java b/server/src/main/java/com/cloud/network/guru/PublicNetworkGuru.java index 1b02e145cc9..f5434388f40 100644 --- a/server/src/main/java/com/cloud/network/guru/PublicNetworkGuru.java +++ b/server/src/main/java/com/cloud/network/guru/PublicNetworkGuru.java @@ -129,7 +129,7 @@ public class PublicNetworkGuru extends AdapterBase implements NetworkGuru { if (vm.getType().equals(VirtualMachine.Type.ConsoleProxy) || vm.getType().equals(VirtualMachine.Type.SecondaryStorageVm)) { forSystemVms = true; } - PublicIp ip = _ipAddrMgr.assignPublicIpAddress(dc.getId(), null, vm.getOwner(), VlanType.VirtualNetwork, null, null, false, forSystemVms); + PublicIp ip = _ipAddrMgr.assignPublicIpAddress(dc.getId(), null, vm.getOwner(), VlanType.VirtualNetwork, null, null, forSystemVms, forSystemVms); nic.setIPv4Address(ip.getAddress().toString()); nic.setIPv4Gateway(ip.getGateway()); nic.setIPv4Netmask(ip.getNetmask()); diff --git a/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java b/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java index 844c3c1b997..320f064a31e 100644 --- a/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java +++ b/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java @@ -16,7 +16,6 @@ // under the License. package com.cloud.network.lb; -import java.security.InvalidParameterException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -733,7 +732,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancerVO loadBalancer = _lbDao.findById(cmd.getLbRuleId()); if (loadBalancer == null) { - throw new InvalidParameterException("Invalid Load balancer Id:" + cmd.getLbRuleId()); + throw new InvalidParameterValueException("Invalid Load balancer Id:" + cmd.getLbRuleId()); } _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, loadBalancer); @@ -790,7 +789,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancerVO loadBalancer = _lbDao.findById(cmd.getLbRuleId()); if (loadBalancer == null) { - throw new InvalidParameterException("Invalid Load balancer Id:" + cmd.getLbRuleId()); + throw new InvalidParameterValueException("Invalid Load balancer Id:" + cmd.getLbRuleId()); } _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, loadBalancer); FirewallRule.State backupState = loadBalancer.getState(); @@ -820,11 +819,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements LBStickinessPolicyVO stickinessPolicy = _lb2stickinesspoliciesDao.findById(stickinessPolicyId); if (stickinessPolicy == null) { - throw new InvalidParameterException("Invalid Stickiness policy id value: " + stickinessPolicyId); + throw new InvalidParameterValueException("Invalid Stickiness policy id value: " + stickinessPolicyId); } LoadBalancerVO loadBalancer = _lbDao.findById(Long.valueOf(stickinessPolicy.getLoadBalancerId())); if (loadBalancer == null) { - throw new InvalidParameterException("Invalid Load balancer : " + stickinessPolicy.getLoadBalancerId() + " for Stickiness policy id: " + stickinessPolicyId); + throw new InvalidParameterValueException("Invalid Load balancer : " + stickinessPolicy.getLoadBalancerId() + " for Stickiness policy id: " + stickinessPolicyId); } long loadBalancerId = loadBalancer.getId(); FirewallRule.State backupState = loadBalancer.getState(); @@ -873,11 +872,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements LBHealthCheckPolicyVO healthCheckPolicy = _lb2healthcheckDao.findById(healthCheckPolicyId); if (healthCheckPolicy == null) { - throw new InvalidParameterException("Invalid HealthCheck policy id value: " + healthCheckPolicyId); + throw new InvalidParameterValueException("Invalid HealthCheck policy id value: " + healthCheckPolicyId); } LoadBalancerVO loadBalancer = _lbDao.findById(Long.valueOf(healthCheckPolicy.getLoadBalancerId())); if (loadBalancer == null) { - throw new InvalidParameterException("Invalid Load balancer : " + healthCheckPolicy.getLoadBalancerId() + " for HealthCheck policy id: " + healthCheckPolicyId); + throw new InvalidParameterValueException("Invalid Load balancer : " + healthCheckPolicy.getLoadBalancerId() + " for HealthCheck policy id: " + healthCheckPolicyId); } final long loadBalancerId = loadBalancer.getId(); FirewallRule.State backupState = loadBalancer.getState(); @@ -1268,12 +1267,12 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancerVO loadBalancer = _lbDao.findById(Long.valueOf(lbRuleId)); if (loadBalancer == null) { - throw new InvalidParameterException("Invalid load balancer id: " + lbRuleId); + throw new InvalidParameterValueException("Invalid load balancer id: " + lbRuleId); } SslCertVO certVO = _entityMgr.findById(SslCertVO.class, certId); if (certVO == null) { - throw new InvalidParameterException("Invalid certificate id: " + certId); + throw new InvalidParameterValueException("Invalid certificate id: " + certId); } _accountMgr.checkAccess(caller.getCallingAccount(), null, true, loadBalancer); @@ -1332,11 +1331,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancerCertMapVO lbCertMap = _lbCertMapDao.findByLbRuleId(lbRuleId); if (loadBalancer == null) { - throw new InvalidParameterException("Invalid load balancer value: " + lbRuleId); + throw new InvalidParameterValueException("Invalid load balancer value: " + lbRuleId); } if (lbCertMap == null) { - throw new InvalidParameterException("No certificate is bound to lb with id: " + lbRuleId); + throw new InvalidParameterValueException("No certificate is bound to lb with id: " + lbRuleId); } _accountMgr.checkAccess(caller.getCallingAccount(), null, true, loadBalancer); @@ -1380,7 +1379,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancerVO loadBalancer = _lbDao.findById(Long.valueOf(loadBalancerId)); if (loadBalancer == null) { - throw new InvalidParameterException("Invalid load balancer value: " + loadBalancerId); + throw new InvalidParameterValueException("Invalid load balancer value: " + loadBalancerId); } _accountMgr.checkAccess(caller.getCallingAccount(), null, true, loadBalancer); @@ -1812,13 +1811,12 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } return cidr; } + @DB @Override - public LoadBalancer createPublicLoadBalancer(final String xId, final String name, final String description, final int srcPort, final int destPort, - final long sourceIpId, - final String protocol, final String algorithm, final boolean openFirewall, final CallContext caller, final String lbProtocol, final Boolean forDisplay, String cidrList) - throws NetworkRuleConflictException { - + public LoadBalancer createPublicLoadBalancer(final String xId, final String name, final String description, final int srcPort, final int destPort, final long sourceIpId, + final String protocol, final String algorithm, final boolean openFirewall, final CallContext caller, final String lbProtocol, + final Boolean forDisplay, String cidrList) throws NetworkRuleConflictException { if (!NetUtils.isValidPort(destPort)) { throw new InvalidParameterValueException("privatePort is an invalid value: " + destPort); } @@ -1827,55 +1825,41 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements throw new InvalidParameterValueException("Invalid algorithm: " + algorithm); } - final IPAddressVO ipAddr = _ipAddressDao.findById(sourceIpId); - // make sure ip address exists - if (ipAddr == null || !ipAddr.readyToUse()) { - InvalidParameterValueException ex = new InvalidParameterValueException("Unable to create load balancer rule, invalid IP address id specified"); - if (ipAddr == null) { - ex.addProxyObject(String.valueOf(sourceIpId), "sourceIpId"); - } else { + try { + final IPAddressVO ipAddr = _ipAddressDao.acquireInLockTable(sourceIpId); + + // make sure ip address exists + if (ipAddr == null || !ipAddr.readyToUse()) { + InvalidParameterValueException ex = new InvalidParameterValueException("Unable to create load balancer rule, invalid IP address id specified"); + if (ipAddr == null) { + ex.addProxyObject(String.valueOf(sourceIpId), "sourceIpId"); + } else { + ex.addProxyObject(ipAddr.getUuid(), "sourceIpId"); + } + throw ex; + } else if (ipAddr.isOneToOneNat()) { + InvalidParameterValueException ex = new InvalidParameterValueException("Unable to create load balancer rule; specified sourceip id has static nat enabled"); ex.addProxyObject(ipAddr.getUuid(), "sourceIpId"); + throw ex; } - throw ex; - } else if (ipAddr.isOneToOneNat()) { - InvalidParameterValueException ex = new InvalidParameterValueException("Unable to create load balancer rule; specified sourceip id has static nat enabled"); - ex.addProxyObject(ipAddr.getUuid(), "sourceIpId"); - throw ex; - } - _accountMgr.checkAccess(caller.getCallingAccount(), null, true, ipAddr); + _accountMgr.checkAccess(caller.getCallingAccount(), null, true, ipAddr); - final Long networkId = ipAddr.getAssociatedWithNetworkId(); - if (networkId == null) { - InvalidParameterValueException ex = - new InvalidParameterValueException("Unable to create load balancer rule ; specified sourceip id is not associated with any network"); - ex.addProxyObject(ipAddr.getUuid(), "sourceIpId"); - throw ex; - } + final Long networkId = ipAddr.getAssociatedWithNetworkId(); + if (networkId == null) { + InvalidParameterValueException ex = + new InvalidParameterValueException("Unable to create load balancer rule ; specified sourceip id is not associated with any network"); + ex.addProxyObject(ipAddr.getUuid(), "sourceIpId"); + throw ex; + } - // verify that lb service is supported by the network - isLbServiceSupportedInNetwork(networkId, Scheme.Public); + // verify that lb service is supported by the network + isLbServiceSupportedInNetwork(networkId, Scheme.Public); - _firewallMgr.validateFirewallRule(caller.getCallingAccount(), ipAddr, srcPort, srcPort, protocol, Purpose.LoadBalancing, FirewallRuleType.User, networkId, null); + _firewallMgr.validateFirewallRule(caller.getCallingAccount(), ipAddr, srcPort, srcPort, protocol, Purpose.LoadBalancing, FirewallRuleType.User, networkId, null); - LoadBalancerVO newRule = - new LoadBalancerVO(xId, name, description, sourceIpId, srcPort, destPort, algorithm, networkId, ipAddr.getAllocatedToAccountId(), - ipAddr.getAllocatedInDomainId(), lbProtocol, cidrList); - - // verify rule is supported by Lb provider of the network - Ip sourceIp = getSourceIp(newRule); - LoadBalancingRule loadBalancing = - new LoadBalancingRule(newRule, new ArrayList(), new ArrayList(), new ArrayList(), sourceIp, null, - lbProtocol); - if (!validateLbRule(loadBalancing)) { - throw new InvalidParameterValueException("LB service provider cannot support this rule"); - } - - return Transaction.execute(new TransactionCallbackWithException() { - @Override - public LoadBalancerVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException { - LoadBalancerVO newRule = - new LoadBalancerVO(xId, name, description, sourceIpId, srcPort, destPort, algorithm, networkId, ipAddr.getAllocatedToAccountId(), + return Transaction.execute((TransactionCallbackWithException) status -> { + LoadBalancerVO newRule = new LoadBalancerVO(xId, name, description, sourceIpId, srcPort, destPort, algorithm, networkId, ipAddr.getAllocatedToAccountId(), ipAddr.getAllocatedInDomainId(), lbProtocol, cidrList); if (forDisplay != null) { @@ -1884,9 +1868,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements // verify rule is supported by Lb provider of the network Ip sourceIp = getSourceIp(newRule); - LoadBalancingRule loadBalancing = - new LoadBalancingRule(newRule, new ArrayList(), new ArrayList(), new ArrayList(), sourceIp, - null, lbProtocol); + LoadBalancingRule loadBalancing = new LoadBalancingRule(newRule, new ArrayList<>(), new ArrayList<>(), new ArrayList<>(), sourceIp, null, lbProtocol); if (!validateLbRule(loadBalancing)) { throw new InvalidParameterValueException("LB service provider cannot support this rule"); } @@ -1906,10 +1888,10 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements throw new CloudRuntimeException("Unable to update the state to add for " + newRule); } logger.debug("Load balancer " + newRule.getId() + " for Ip address id=" + sourceIpId + ", public port " + srcPort + ", private port " + destPort + - " is added successfully."); + " is added successfully."); CallContext.current().setEventDetails("Load balancer Id: " + newRule.getId()); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_LOAD_BALANCER_CREATE, ipAddr.getAllocatedToAccountId(), ipAddr.getDataCenterId(), newRule.getId(), - null, LoadBalancingRule.class.getName(), newRule.getUuid()); + null, LoadBalancingRule.class.getName(), newRule.getUuid()); return newRule; } catch (Exception e) { @@ -1924,9 +1906,10 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements removeLBRule(newRule); } } - } - }); - + }); + } finally { + _ipAddressDao.releaseFromLockTable(sourceIpId); + } } @Override @@ -2708,7 +2691,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancerVO loadBalancer = _lbDao.findById(Long.valueOf(policy.getLoadBalancerId())); if (loadBalancer == null) { - throw new InvalidParameterException("Invalid Load balancer : " + policy.getLoadBalancerId() + " for Stickiness policy id: " + id); + throw new InvalidParameterValueException("Invalid Load balancer : " + policy.getLoadBalancerId() + " for Stickiness policy id: " + id); } _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, loadBalancer); @@ -2735,7 +2718,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancerVO loadBalancer = _lbDao.findById(Long.valueOf(policy.getLoadBalancerId())); if (loadBalancer == null) { - throw new InvalidParameterException("Invalid Load balancer : " + policy.getLoadBalancerId() + " for Stickiness policy id: " + id); + throw new InvalidParameterValueException("Invalid Load balancer : " + policy.getLoadBalancerId() + " for Stickiness policy id: " + id); } _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, loadBalancer); diff --git a/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java b/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java index 69192100514..215cbb40461 100644 --- a/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java +++ b/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java @@ -32,6 +32,9 @@ import com.cloud.agent.api.HandleCksIsoCommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.network.BgpPeer; +import org.apache.cloudstack.network.BgpPeerTO; +import org.apache.cloudstack.network.dao.BgpPeerDetailsDao; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import org.springframework.beans.factory.annotation.Autowired; @@ -49,6 +52,7 @@ import com.cloud.agent.api.routing.LoadBalancerConfigCommand; import com.cloud.agent.api.routing.NetworkElementCommand; import com.cloud.agent.api.routing.RemoteAccessVpnCfgCommand; import com.cloud.agent.api.routing.SavePasswordCommand; +import com.cloud.agent.api.routing.SetBgpPeersCommand; import com.cloud.agent.api.routing.SetFirewallRulesCommand; import com.cloud.agent.api.routing.SetIpv6FirewallRulesCommand; import com.cloud.agent.api.routing.SetNetworkACLCommand; @@ -72,9 +76,11 @@ import com.cloud.agent.api.to.StaticNatRuleTO; import com.cloud.agent.manager.Commands; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; +import com.cloud.dc.ASNumberVO; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenter.NetworkType; import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.ASNumberDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.VlanDao; import com.cloud.domain.Domain; @@ -210,6 +216,10 @@ public class CommandSetupHelper { Ipv6Service ipv6Service; @Inject VirtualRouterProviderDao vrProviderDao; + @Inject + ASNumberDao asNumberDao; + @Inject + BgpPeerDetailsDao bgpPeerDetailsDao; @Autowired @Qualifier("networkHelper") @@ -455,8 +465,12 @@ public class CommandSetupHelper { _rulesDao.loadSourceCidrs((FirewallRuleVO) rule); final FirewallRule.TrafficType traffictype = rule.getTrafficType(); if (traffictype == FirewallRule.TrafficType.Ingress) { - final IpAddress sourceIp = _networkModel.getIp(rule.getSourceIpAddressId()); - final FirewallRuleTO ruleTO = new FirewallRuleTO(rule, null, sourceIp.getAddress().addr(), Purpose.Firewall, traffictype); + String srcIp = null; + if (rule.getSourceIpAddressId() != null) { + final IpAddress sourceIp = _networkModel.getIp(rule.getSourceIpAddressId()); + srcIp = sourceIp.getAddress().addr(); + } + final FirewallRuleTO ruleTO = new FirewallRuleTO(rule, null, srcIp, Purpose.Firewall, traffictype); rulesTO.add(ruleTO); } else if (rule.getTrafficType() == FirewallRule.TrafficType.Egress) { final NetworkVO network = _networkDao.findById(guestNetworkId); @@ -540,8 +554,12 @@ public class CommandSetupHelper { _rulesDao.loadDestinationCidrs((FirewallRuleVO)rule); final FirewallRule.TrafficType traffictype = rule.getTrafficType(); if (traffictype == FirewallRule.TrafficType.Ingress) { - final IpAddress sourceIp = _networkModel.getIp(rule.getSourceIpAddressId()); - final FirewallRuleTO ruleTO = new FirewallRuleTO(rule, null, sourceIp.getAddress().addr(), Purpose.Firewall, traffictype); + String srcIp = null; + if (rule.getSourceIpAddressId() != null) { + final IpAddress sourceIp = _networkModel.getIp(rule.getSourceIpAddressId()); + srcIp = sourceIp.getAddress().addr(); + } + final FirewallRuleTO ruleTO = new FirewallRuleTO(rule, null, srcIp, Purpose.Firewall, traffictype); rulesTO.add(ruleTO); } else if (rule.getTrafficType() == FirewallRule.TrafficType.Egress) { final NetworkVO network = _networkDao.findById(guestNetworkId); @@ -1412,4 +1430,43 @@ public class CommandSetupHelper { command.setAccessDetail(NetworkElementCommand.ROUTER_NAME, router.getInstanceName()); cmds.addCommand("handleCksIso", command); } + + public void createBgpPeersCommands(final List bgpPeers, final VirtualRouter router, final Commands cmds, final Network network) { + List bgpPeerTOs = new ArrayList<>(); + + ASNumberVO asNumberVO = router.getVpcId() != null ? + asNumberDao.findByZoneAndVpcId(router.getDataCenterId(), router.getVpcId()) : + asNumberDao.findByZoneAndNetworkId(router.getDataCenterId(), network.getId()); + if (asNumberVO == null) { + logger.debug("No AS number found for the guest network or VPC, skipping."); + return; + } + + List guestNetworks = new ArrayList<>(); + if (router.getVpcId() != null) { + List networks = _networkDao.listByVpc(router.getVpcId()); + for (NetworkVO networkVO : networks) { + final NetworkOfferingVO offering = _networkOfferingDao.findByIdIncludingRemoved(networkVO.getNetworkOfferingId()); + if (NetworkOffering.RoutingMode.Dynamic.equals(offering.getRoutingMode())) { + guestNetworks.add(networkVO); + } + } + } else { + guestNetworks.add(network); + } + for (BgpPeer bgpPeer: bgpPeers) { + Map bgpPeerDetails = bgpPeerDetailsDao.getBgpPeerDetails(bgpPeer.getId()); + for (Network guestNetwork : guestNetworks) { + bgpPeerTOs.add(new BgpPeerTO(bgpPeer.getId(), bgpPeer.getIp4Address(), bgpPeer.getIp6Address(), bgpPeer.getAsNumber(), bgpPeer.getPassword(), + guestNetwork.getId(), asNumberVO.getAsNumber(), guestNetwork.getCidr(), guestNetwork.getIp6Cidr(), bgpPeerDetails)); + } + } + + final SetBgpPeersCommand cmd = new SetBgpPeersCommand(bgpPeerTOs); + cmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, _routerControlHelper.getRouterControlIp(router.getId())); + cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, router.getInstanceName()); + final DataCenterVO dcVo = _dcDao.findById(router.getDataCenterId()); + cmd.setAccessDetail(NetworkElementCommand.ZONE_NETWORK_TYPE, dcVo.getNetworkType().toString()); + cmds.addCommand("bgpPeersCommand", cmd); + } } diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index b9f1350615b..8f07fcfca3b 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -65,6 +65,10 @@ import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; import org.apache.cloudstack.lb.ApplicationLoadBalancerRuleVO; import org.apache.cloudstack.lb.dao.ApplicationLoadBalancerRuleDao; import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.network.BgpPeerVO; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.apache.cloudstack.network.dao.BgpPeerDao; +import org.apache.cloudstack.network.dao.BgpPeerNetworkMapDao; import org.apache.cloudstack.network.topology.NetworkTopology; import org.apache.cloudstack.network.topology.NetworkTopologyContext; import org.apache.cloudstack.utils.CloudStackVersion; @@ -122,6 +126,7 @@ import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.dc.dao.VlanDao; import com.cloud.deploy.DeployDestination; +import com.cloud.deploy.DeploymentPlanner; import com.cloud.domain.Domain; import com.cloud.event.ActionEvent; import com.cloud.event.ActionEventUtils; @@ -340,6 +345,12 @@ Configurable, StateListener publicIps = getPublicIpsToApply(router, provider, guestNetworkId); + final ArrayList publicIps = getPublicIpsToApply(provider, guestNetworkId); final List firewallRulesEgress = new ArrayList(); final List ipv6firewallRules = new ArrayList<>(); // Fetch firewall Egress rules. if (_networkModel.isProviderSupportServiceInNetwork(guestNetworkId, Service.Firewall, provider)) { firewallRulesEgress.addAll(_rulesDao.listByNetworkPurposeTrafficType(guestNetworkId, Purpose.Firewall, FirewallRule.TrafficType.Egress)); - //create egress default rule for VR + // create egress default rule for VR createDefaultEgressFirewallRule(firewallRulesEgress, guestNetworkId); + // add routing ingress firewall rules which do not have public IPs + firewallRulesEgress.addAll(_rulesDao.listRoutingIngressFirewallRules(guestNetworkId)); + + // create egress default Ipv6 rules for VR createDefaultEgressIpv6FirewallRule(ipv6firewallRules, guestNetworkId); ipv6firewallRules.addAll(_rulesDao.listByNetworkPurposeTrafficType(guestNetworkId, Purpose.Ipv6Firewall, FirewallRule.TrafficType.Egress)); ipv6firewallRules.addAll(_rulesDao.listByNetworkPurposeTrafficType(guestNetworkId, Purpose.Ipv6Firewall, FirewallRule.TrafficType.Ingress)); @@ -2465,6 +2503,21 @@ Configurable, StateListener bgpPeers = bgpPeerDao.listNonRevokeByVpcId(guestNetwork.getVpcId()); + _commandSetupHelper.createBgpPeersCommands(bgpPeers, router, cmds, guestNetwork); + } + } else { + if (routedIpv4Manager.isDynamicRoutedNetwork(guestNetwork)) { + final List bgpPeers = bgpPeerDao.listNonRevokeByNetworkId(guestNetworkId); + _commandSetupHelper.createBgpPeersCommands(bgpPeers, router, cmds, guestNetwork); + } + } + if (publicIps != null && !publicIps.isEmpty()) { final List vpns = new ArrayList(); final List pfRules = new ArrayList(); @@ -2547,7 +2600,6 @@ Configurable, StateListener dhcpCapabilities = _networkSvc.getNetworkOfferingServiceCapabilities( _networkOfferingDao.findById(_networkDao.findById(guestNetworkId).getNetworkOfferingId()), Service.Dhcp); @@ -2646,7 +2698,7 @@ Configurable, StateListener vlanMacAddress) { - final ArrayList publicIps = getPublicIpsToApply(router, provider, guestNetworkId); + final ArrayList publicIps = getPublicIpsToApply(provider, guestNetworkId); if (publicIps != null && !publicIps.isEmpty()) { logger.debug("Found " + publicIps.size() + " ip(s) to apply as a part of domR " + router + " start."); @@ -2657,18 +2709,10 @@ Configurable, StateListener getPublicIpsToApply(final VirtualRouter router, final Provider provider, final Long guestNetworkId, + protected ArrayList getPublicIpsToApply(final Provider provider, final Long guestNetworkId, final com.cloud.network.IpAddress.State... skipInStates) { - final long ownerId = router.getAccountId(); - final List userIps; - final Network guestNetwork = _networkDao.findById(guestNetworkId); - if (guestNetwork.getGuestType() == GuestType.Shared) { - // ignore the account id for the shared network - userIps = _networkModel.listPublicIpsAssignedToGuestNtwk(guestNetworkId, null); - } else { - userIps = _networkModel.listPublicIpsAssignedToGuestNtwk(ownerId, guestNetworkId, null); - } + final List userIps = _networkModel.listPublicIpsAssignedToGuestNtwk(guestNetworkId, null); final List allPublicIps = new ArrayList(); if (userIps != null && !userIps.isEmpty()) { @@ -3008,6 +3052,14 @@ Configurable, StateListener params, DeploymentPlanner planner) + throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, + OperationTimedoutException { + _itMgr.advanceStart(vm.getUuid(), params, planner); + } + @Override public List getRoutersForNetwork(final long networkId) { final List routers = _routerDao.findByNetwork(networkId); @@ -3129,6 +3181,9 @@ Configurable, StateListener " + routerNic.getNetworkId() + ". It might be a problem!"); continue; } + if (routedIpv4Manager.isRoutedNetwork(network)) { + continue; + } if (forVpc && network.getTrafficType() == TrafficType.Public || !forVpc && network.getTrafficType() == TrafficType.Guest && network.getGuestType() == Network.GuestType.Isolated) { final NetworkUsageCommand usageCmd = new NetworkUsageCommand(privateIP, router.getHostName(), forVpc, routerNic.getIPv4Address()); diff --git a/server/src/main/java/com/cloud/network/router/VpcNetworkHelperImpl.java b/server/src/main/java/com/cloud/network/router/VpcNetworkHelperImpl.java index fa2f2aba8ff..13e118349b0 100644 --- a/server/src/main/java/com/cloud/network/router/VpcNetworkHelperImpl.java +++ b/server/src/main/java/com/cloud/network/router/VpcNetworkHelperImpl.java @@ -134,6 +134,7 @@ public class VpcNetworkHelperImpl extends NetworkHelperImpl { final PublicIp publicIp = PublicIp.createFromAddrAndVlan(ip, _vlanDao.findById(ip.getVlanId())); if ((ip.getState() == IpAddress.State.Allocated || ip.getState() == IpAddress.State.Allocating) && vpcMgr.isIpAllocatedToVpc(ip) + && Objects.nonNull(publicIp.getVlanTag()) && !publicVlans.contains(publicIp.getVlanTag())) { logger.debug("Allocating nic for router in vlan " + publicIp.getVlanTag()); final NicProfile publicNic = new NicProfile(); diff --git a/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java index f45386ca8a7..bb517eed524 100644 --- a/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java @@ -30,6 +30,8 @@ import javax.naming.ConfigurationException; import com.cloud.network.dao.NetworkDao; import com.cloud.network.vpc.dao.VpcDao; +import org.apache.cloudstack.agent.routing.ManageServiceCommand; +import com.cloud.agent.api.routing.NetworkElementCommand; import org.apache.commons.collections.CollectionUtils; import org.springframework.stereotype.Component; @@ -231,6 +233,54 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian return result; } + @Override + public boolean stopKeepAlivedOnRouter(VirtualRouter router, + Network network) throws ConcurrentOperationException, ResourceUnavailableException { + return manageKeepalivedServiceOnRouter(router, network, "stop"); + } + + @Override + public boolean startKeepAlivedOnRouter(VirtualRouter router, + Network network) throws ConcurrentOperationException, ResourceUnavailableException { + return manageKeepalivedServiceOnRouter(router, network, "start"); + } + + private boolean manageKeepalivedServiceOnRouter(VirtualRouter router, + Network network, String action) throws ConcurrentOperationException, ResourceUnavailableException { + if (network.getTrafficType() != TrafficType.Guest) { + logger.warn("Network {} is not of type {}", network, TrafficType.Guest); + return false; + } + boolean result = true; + try { + if (router.getState() == State.Running) { + final ManageServiceCommand stopCommand = new ManageServiceCommand("keepalived", action); + stopCommand.setAccessDetail(NetworkElementCommand.ROUTER_IP, _routerControlHelper.getRouterControlIp(router.getId())); + + final Commands cmds = new Commands(Command.OnError.Stop); + cmds.addCommand("manageKeepalived", stopCommand); + _nwHelper.sendCommandsToRouter(router, cmds); + + final Answer setupAnswer = cmds.getAnswer("manageKeepalived"); + if (!(setupAnswer != null && setupAnswer.getResult())) { + logger.warn("Unable to {} keepalived on router {}", action, router); + result = false; + } + } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { + logger.debug("Router {} is in {}, so not sending command to the backend", router.getInstanceName(), router.getState()); + } else { + String message = "Unable to " + action + " keepalived on virtual router [" + router + "] is not in the right state " + router.getState(); + logger.warn(message); + throw new ResourceUnavailableException(message, DataCenter.class, router.getDataCenterId()); + } + } catch (final Exception ex) { + logger.warn("Failed to {} keepalived on router {} to network {} due to {}", action, router, network, ex.getLocalizedMessage()); + logger.debug("Failed to {} keepalived on router {} to network {}", action, router, network, ex); + result = false; + } + return result; + } + protected boolean setupVpcGuestNetwork(final Network network, final VirtualRouter router, final boolean add, final NicProfile guestNic) throws ConcurrentOperationException, ResourceUnavailableException { @@ -303,6 +353,9 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian if (defaultIp6Dns2 != null) { buf.append(" ip6dns2=").append(defaultIp6Dns2); } + if (routedIpv4Manager.isRoutedVpc(vpc)) { + buf.append(" is_routed=true"); + } } } @@ -411,6 +464,9 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian domainRouterVO.getInstanceName(), domainRouterVO.getType(), details); cmds.addCommand(plugNicCmd); final VpcVO vpc = _vpcDao.findById(domainRouterVO.getVpcId()); + if (routedIpv4Manager.isRoutedVpc(vpc)) { + continue; + } final NetworkUsageCommand netUsageCmd = new NetworkUsageCommand(domainRouterVO.getPrivateIpAddress(), domainRouterVO.getInstanceName(), true, publicNic.getIPv4Address(), vpc.getCidr()); usageCmds.add(netUsageCmd); UserStatisticsVO stats = _userStatsDao.findBy(domainRouterVO.getAccountId(), domainRouterVO.getDataCenterId(), publicNtwk.getId(), publicNic.getIPv4Address(), domainRouterVO.getId(), @@ -707,7 +763,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian } if (domainRouterVO.getState() == State.Starting || domainRouterVO.getState() == State.Running) { - final ArrayList publicIps = getPublicIpsToApply(domainRouterVO, provider, guestNetworkId, IpAddress.State.Releasing); + final ArrayList publicIps = getPublicIpsToApply(provider, guestNetworkId, IpAddress.State.Releasing); if (publicIps != null && !publicIps.isEmpty()) { logger.debug("Found " + publicIps.size() + " ip(s) to apply as a part of domR " + domainRouterVO + " start."); diff --git a/server/src/main/java/com/cloud/network/rules/BgpPeersRules.java b/server/src/main/java/com/cloud/network/rules/BgpPeersRules.java new file mode 100644 index 00000000000..a4f780c10d3 --- /dev/null +++ b/server/src/main/java/com/cloud/network/rules/BgpPeersRules.java @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.network.rules; + +import java.util.List; + +import org.apache.cloudstack.network.BgpPeer; +import org.apache.cloudstack.network.topology.NetworkTopologyVisitor; + +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.Network; +import com.cloud.network.router.VirtualRouter; + +public class BgpPeersRules extends RuleApplier { + + private final List bgpPeers; + + public BgpPeersRules(final List bgpPeers, final Network network) { + super(network); + this.bgpPeers = bgpPeers; + } + + public List getBgpPeers() { + return bgpPeers; + } + + @Override + public boolean accept(final NetworkTopologyVisitor visitor, final VirtualRouter router) throws ResourceUnavailableException { + _router = router; + + return visitor.visit(this); + } +} diff --git a/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java b/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java index 15d1db48283..55f7609f9c6 100644 --- a/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java +++ b/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java @@ -205,124 +205,122 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules final Long ipAddrId = rule.getSourceIpAddressId(); - IPAddressVO ipAddress = _ipAddressDao.findById(ipAddrId); - - // Validate ip address - if (ipAddress == null) { - throw new InvalidParameterValueException("Unable to create port forwarding rule; ip id=" + ipAddrId + " doesn't exist in the system"); - } else if (ipAddress.isOneToOneNat()) { - throw new InvalidParameterValueException("Unable to create port forwarding rule; ip id=" + ipAddrId + " has static nat enabled"); - } - - final Long networkId = rule.getNetworkId(); - Network network = _networkModel.getNetwork(networkId); - //associate ip address to network (if needed) - boolean performedIpAssoc = false; - Nic guestNic; - if (ipAddress.getAssociatedWithNetworkId() == null) { - boolean assignToVpcNtwk = network.getVpcId() != null && ipAddress.getVpcId() != null && ipAddress.getVpcId().longValue() == network.getVpcId(); - if (assignToVpcNtwk) { - _networkModel.checkIpForService(ipAddress, Service.PortForwarding, networkId); - - logger.debug("The ip is not associated with the VPC network id=" + networkId + ", so assigning"); - try { - ipAddress = _ipAddrMgr.associateIPToGuestNetwork(ipAddrId, networkId, false); - performedIpAssoc = true; - } catch (Exception ex) { - throw new CloudRuntimeException("Failed to associate ip to VPC network as " + "a part of port forwarding rule creation"); - } - } - } else { - _networkModel.checkIpForService(ipAddress, Service.PortForwarding, null); - } - - if (ipAddress.getAssociatedWithNetworkId() == null) { - throw new InvalidParameterValueException("Ip address " + ipAddress + " is not assigned to the network " + network); - } - try { - _firewallMgr.validateFirewallRule(caller, ipAddress, rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol(), Purpose.PortForwarding, - FirewallRuleType.User, networkId, rule.getTrafficType()); + IPAddressVO ipAddress = _ipAddressDao.acquireInLockTable(ipAddrId); - final Long accountId = ipAddress.getAllocatedToAccountId(); - final Long domainId = ipAddress.getAllocatedInDomainId(); - - // start port can't be bigger than end port - if (rule.getDestinationPortStart() > rule.getDestinationPortEnd()) { - throw new InvalidParameterValueException("Start port can't be bigger than end port"); + // Validate ip address + if (ipAddress == null) { + throw new InvalidParameterValueException("Unable to create port forwarding rule; ip id=" + ipAddrId + " doesn't exist in the system"); + } else if (ipAddress.isOneToOneNat()) { + throw new InvalidParameterValueException("Unable to create port forwarding rule; ip id=" + ipAddrId + " has static nat enabled"); } - // check that the port ranges are of equal size - if ((rule.getDestinationPortEnd() - rule.getDestinationPortStart()) != (rule.getSourcePortEnd() - rule.getSourcePortStart())) { - throw new InvalidParameterValueException("Source port and destination port ranges should be of equal sizes."); - } + final Long networkId = rule.getNetworkId(); + Network network = _networkModel.getNetwork(networkId); + //associate ip address to network (if needed) + boolean performedIpAssoc = false; + Nic guestNic; + if (ipAddress.getAssociatedWithNetworkId() == null) { + boolean assignToVpcNtwk = network.getVpcId() != null && ipAddress.getVpcId() != null && ipAddress.getVpcId().longValue() == network.getVpcId(); + if (assignToVpcNtwk) { + _networkModel.checkIpForService(ipAddress, Service.PortForwarding, networkId); - // validate user VM exists - UserVm vm = _vmDao.findById(vmId); - if (vm == null) { - throw new InvalidParameterValueException("Unable to create port forwarding rule on address " + ipAddress + ", invalid virtual machine id specified (" + - vmId + ")."); - } else if (vm.getState() == VirtualMachine.State.Destroyed || vm.getState() == VirtualMachine.State.Expunging) { - throw new InvalidParameterValueException("Invalid user vm: " + vm.getId()); - } - - // Verify that vm has nic in the network - Ip dstIp = rule.getDestinationIpAddress(); - guestNic = _networkModel.getNicInNetwork(vmId, networkId); - if (guestNic == null || guestNic.getIPv4Address() == null) { - throw new InvalidParameterValueException("Vm doesn't belong to network associated with ipAddress"); - } else { - dstIp = new Ip(guestNic.getIPv4Address()); - } - - if (vmIp != null) { - //vm ip is passed so it can be primary or secondary ip addreess. - if (!dstIp.equals(vmIp)) { - //the vm ip is secondary ip to the nic. - // is vmIp is secondary ip or not - NicSecondaryIp secondaryIp = _nicSecondaryDao.findByIp4AddressAndNicId(vmIp.toString(), guestNic.getId()); - if (secondaryIp == null) { - throw new InvalidParameterValueException("IP Address is not in the VM nic's network "); + logger.debug("The ip is not associated with the VPC network id=" + networkId + ", so assigning"); + try { + ipAddress = _ipAddrMgr.associateIPToGuestNetwork(ipAddrId, networkId, false); + performedIpAssoc = true; + } catch (Exception ex) { + throw new CloudRuntimeException("Failed to associate ip to VPC network as " + "a part of port forwarding rule creation"); } - dstIp = vmIp; } + } else { + _networkModel.checkIpForService(ipAddress, Service.PortForwarding, null); } - //if start port and end port are passed in, and they are not equal to each other, perform the validation - boolean validatePortRange = false; - if (rule.getSourcePortStart().intValue() != rule.getSourcePortEnd().intValue() || rule.getDestinationPortStart() != rule.getDestinationPortEnd()) { - validatePortRange = true; + if (ipAddress.getAssociatedWithNetworkId() == null) { + throw new InvalidParameterValueException("Ip address " + ipAddress + " is not assigned to the network " + network); } - if (validatePortRange) { - //source start port and source dest port should be the same. The same applies to dest ports - if (rule.getSourcePortStart().intValue() != rule.getDestinationPortStart()) { - throw new InvalidParameterValueException("Private port start should be equal to public port start"); + try { + _firewallMgr.validateFirewallRule(caller, ipAddress, rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol(), Purpose.PortForwarding, + FirewallRuleType.User, networkId, rule.getTrafficType()); + + final Long accountId = ipAddress.getAllocatedToAccountId(); + final Long domainId = ipAddress.getAllocatedInDomainId(); + + // start port can't be bigger than end port + if (rule.getDestinationPortStart() > rule.getDestinationPortEnd()) { + throw new InvalidParameterValueException("Start port can't be bigger than end port"); } - if (rule.getSourcePortEnd().intValue() != rule.getDestinationPortEnd()) { - throw new InvalidParameterValueException("Private port end should be equal to public port end"); + // check that the port ranges are of equal size + if ((rule.getDestinationPortEnd() - rule.getDestinationPortStart()) != (rule.getSourcePortEnd() - rule.getSourcePortStart())) { + throw new InvalidParameterValueException("Source port and destination port ranges should be of equal sizes."); } - } - final Ip dstIpFinal = dstIp; - final IPAddressVO ipAddressFinal = ipAddress; - return Transaction.execute(new TransactionCallbackWithException() { - @Override - public PortForwardingRuleVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException { + // validate user VM exists + UserVm vm = _vmDao.findById(vmId); + if (vm == null) { + throw new InvalidParameterValueException("Unable to create port forwarding rule on address " + ipAddress + ", invalid virtual machine id specified (" + + vmId + ")."); + } else if (vm.getState() == VirtualMachine.State.Destroyed || vm.getState() == VirtualMachine.State.Expunging) { + throw new InvalidParameterValueException("Invalid user vm: " + vm.getId()); + } + + // Verify that vm has nic in the network + Ip dstIp = rule.getDestinationIpAddress(); + guestNic = _networkModel.getNicInNetwork(vmId, networkId); + if (guestNic == null || guestNic.getIPv4Address() == null) { + throw new InvalidParameterValueException("Vm doesn't belong to network associated with ipAddress"); + } else { + dstIp = new Ip(guestNic.getIPv4Address()); + } + + if (vmIp != null) { + //vm ip is passed so it can be primary or secondary ip addreess. + if (!dstIp.equals(vmIp)) { + //the vm ip is secondary ip to the nic. + // is vmIp is secondary ip or not + NicSecondaryIp secondaryIp = _nicSecondaryDao.findByIp4AddressAndNicId(vmIp.toString(), guestNic.getId()); + if (secondaryIp == null) { + throw new InvalidParameterValueException("IP Address is not in the VM nic's network "); + } + dstIp = vmIp; + } + } + + //if start port and end port are passed in, and they are not equal to each other, perform the validation + boolean validatePortRange = false; + if (rule.getSourcePortStart().intValue() != rule.getSourcePortEnd().intValue() || rule.getDestinationPortStart() != rule.getDestinationPortEnd()) { + validatePortRange = true; + } + + if (validatePortRange) { + //source start port and source dest port should be the same. The same applies to dest ports + if (rule.getSourcePortStart().intValue() != rule.getDestinationPortStart()) { + throw new InvalidParameterValueException("Private port start should be equal to public port start"); + } + + if (rule.getSourcePortEnd().intValue() != rule.getDestinationPortEnd()) { + throw new InvalidParameterValueException("Private port end should be equal to public port end"); + } + } + + final Ip dstIpFinal = dstIp; + final IPAddressVO ipAddressFinal = ipAddress; + return Transaction.execute((TransactionCallbackWithException) status -> { PortForwardingRuleVO newRule = - new PortForwardingRuleVO(rule.getXid(), rule.getSourceIpAddressId(), rule.getSourcePortStart(), rule.getSourcePortEnd(), dstIpFinal, - rule.getDestinationPortStart(), rule.getDestinationPortEnd(), rule.getProtocol().toLowerCase(), networkId, accountId, domainId, vmId); + new PortForwardingRuleVO(rule.getXid(), rule.getSourceIpAddressId(), rule.getSourcePortStart(), rule.getSourcePortEnd(), dstIpFinal, + rule.getDestinationPortStart(), rule.getDestinationPortEnd(), rule.getProtocol().toLowerCase(), networkId, accountId, domainId, vmId); if (forDisplay != null) { newRule.setDisplay(forDisplay); } newRule = _portForwardingDao.persist(newRule); - // create firewallRule for 0.0.0.0/0 cidr if (openFirewall) { _firewallMgr.createRuleForAllCidrs(ipAddrId, caller, rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol(), null, null, - newRule.getId(), networkId); + newRule.getId(), networkId); } try { @@ -332,7 +330,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules } CallContext.current().setEventDetails("Rule Id: " + newRule.getId()); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NET_RULE_ADD, newRule.getAccountId(), ipAddressFinal.getDataCenterId(), newRule.getId(), null, - PortForwardingRule.class.getName(), newRule.getUuid()); + PortForwardingRule.class.getName(), newRule.getUuid()); return newRule; } catch (Exception e) { if (newRule != null) { @@ -347,16 +345,17 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules throw new CloudRuntimeException("Unable to add rule for the ip id=" + ipAddrId, e); } + }); + } finally { + // release ip address if ipassoc was perfored + if (performedIpAssoc) { + //if the rule is the last one for the ip address assigned to VPC, unassign it from the network + IpAddress ip = _ipAddressDao.findById(ipAddress.getId()); + _vpcMgr.unassignIPFromVpcNetwork(ip.getId(), networkId); } - }); - - } finally { - // release ip address if ipassoc was perfored - if (performedIpAssoc) { - //if the rule is the last one for the ip address assigned to VPC, unassign it from the network - IpAddress ip = _ipAddressDao.findById(ipAddress.getId()); - _vpcMgr.unassignIPFromVpcNetwork(ip.getId(), networkId); } + } finally { + _ipAddressDao.releaseFromLockTable(ipAddrId); } } @@ -368,46 +367,44 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules final Long ipAddrId = rule.getSourceIpAddressId(); - IPAddressVO ipAddress = _ipAddressDao.findById(ipAddrId); + try { + IPAddressVO ipAddress = _ipAddressDao.acquireInLockTable(ipAddrId); - // Validate ip address - if (ipAddress == null) { - throw new InvalidParameterValueException("Unable to create static nat rule; ip id=" + ipAddrId + " doesn't exist in the system"); - } else if (ipAddress.isSourceNat() || !ipAddress.isOneToOneNat() || ipAddress.getAssociatedWithVmId() == null) { - throw new NetworkRuleConflictException("Can't do static nat on ip address: " + ipAddress.getAddress()); - } + // Validate ip address + if (ipAddress == null) { + throw new InvalidParameterValueException("Unable to create static nat rule; ip id=" + ipAddrId + " doesn't exist in the system"); + } else if (ipAddress.isSourceNat() || !ipAddress.isOneToOneNat() || ipAddress.getAssociatedWithVmId() == null) { + throw new NetworkRuleConflictException("Can't do static nat on ip address: " + ipAddress.getAddress()); + } - _firewallMgr.validateFirewallRule(caller, ipAddress, rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol(), Purpose.StaticNat, - FirewallRuleType.User, null, rule.getTrafficType()); + _firewallMgr.validateFirewallRule(caller, ipAddress, rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol(), Purpose.StaticNat, + FirewallRuleType.User, null, rule.getTrafficType()); - final Long networkId = ipAddress.getAssociatedWithNetworkId(); - final Long accountId = ipAddress.getAllocatedToAccountId(); - final Long domainId = ipAddress.getAllocatedInDomainId(); + final Long networkId = ipAddress.getAssociatedWithNetworkId(); + final Long accountId = ipAddress.getAllocatedToAccountId(); + final Long domainId = ipAddress.getAllocatedInDomainId(); - _networkModel.checkIpForService(ipAddress, Service.StaticNat, null); + _networkModel.checkIpForService(ipAddress, Service.StaticNat, null); - Network network = _networkModel.getNetwork(networkId); - NetworkOffering off = _entityMgr.findById(NetworkOffering.class, network.getNetworkOfferingId()); - if (off.isElasticIp()) { - throw new InvalidParameterValueException("Can't create ip forwarding rules for the network where elasticIP service is enabled"); - } - - //String dstIp = _networkModel.getIpInNetwork(ipAddress.getAssociatedWithVmId(), networkId); - final String dstIp = ipAddress.getVmIp(); - return Transaction.execute(new TransactionCallbackWithException() { - @Override - public StaticNatRule doInTransaction(TransactionStatus status) throws NetworkRuleConflictException { + Network network = _networkModel.getNetwork(networkId); + NetworkOffering off = _entityMgr.findById(NetworkOffering.class, network.getNetworkOfferingId()); + if (off.isElasticIp()) { + throw new InvalidParameterValueException("Can't create ip forwarding rules for the network where elasticIP service is enabled"); + } + //String dstIp = _networkModel.getIpInNetwork(ipAddress.getAssociatedWithVmId(), networkId); + final String dstIp = ipAddress.getVmIp(); + return Transaction.execute((TransactionCallbackWithException) status -> { FirewallRuleVO newRule = - new FirewallRuleVO(rule.getXid(), rule.getSourceIpAddressId(), rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol().toLowerCase(), - networkId, accountId, domainId, rule.getPurpose(), null, null, null, null, null); + new FirewallRuleVO(rule.getXid(), rule.getSourceIpAddressId(), rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol().toLowerCase(), + networkId, accountId, domainId, rule.getPurpose(), null, null, null, null, null); newRule = _firewallDao.persist(newRule); // create firewallRule for 0.0.0.0/0 cidr if (openFirewall) { _firewallMgr.createRuleForAllCidrs(ipAddrId, caller, rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol(), null, null, - newRule.getId(), networkId); + newRule.getId(), networkId); } try { @@ -417,11 +414,9 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules } CallContext.current().setEventDetails("Rule Id: " + newRule.getId()); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NET_RULE_ADD, newRule.getAccountId(), 0, newRule.getId(), null, FirewallRule.class.getName(), - newRule.getUuid()); + newRule.getUuid()); - StaticNatRule staticNatRule = new StaticNatRuleImpl(newRule, dstIp); - - return staticNatRule; + return new StaticNatRuleImpl(newRule, dstIp); } catch (Exception e) { if (newRule != null) { // no need to apply the rule as it wasn't programmed on the backend yet @@ -434,9 +429,10 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules } throw new CloudRuntimeException("Unable to add static nat rule for the ip id=" + newRule.getSourceIpAddressId(), e); } - } - }); - + }); + } finally { + _ipAddressDao.releaseFromLockTable(ipAddrId); + } } @Override diff --git a/server/src/main/java/com/cloud/network/vpc/NetworkACLServiceImpl.java b/server/src/main/java/com/cloud/network/vpc/NetworkACLServiceImpl.java index dd0dce5e10f..54338173282 100644 --- a/server/src/main/java/com/cloud/network/vpc/NetworkACLServiceImpl.java +++ b/server/src/main/java/com/cloud/network/vpc/NetworkACLServiceImpl.java @@ -325,8 +325,8 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ public NetworkACLItem createNetworkACLItem(CreateNetworkACLCmd createNetworkACLCmd) { Long aclId = createAclListIfNeeded(createNetworkACLCmd); - Integer sourcePortStart = createNetworkACLCmd.getSourcePortStart(); - Integer sourcePortEnd = createNetworkACLCmd.getSourcePortEnd(); + Integer sourcePortStart = createNetworkACLCmd.getPublicStartPort(); + Integer sourcePortEnd = createNetworkACLCmd.getPublicEndPort(); String protocol = createNetworkACLCmd.getProtocol(); List sourceCidrList = createNetworkACLCmd.getSourceCidrList(); Integer icmpCode = createNetworkACLCmd.getIcmpCode(); @@ -697,6 +697,7 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ final String trafficType = cmd.getTrafficType(); final String protocol = cmd.getProtocol(); final String action = cmd.getAction(); + final String keyword = cmd.getKeyword(); final Map tags = cmd.getTags(); final Account caller = CallContext.current().getCallingAccount(); @@ -708,6 +709,7 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ sb.and("trafficType", sb.entity().getTrafficType(), Op.EQ); sb.and("protocol", sb.entity().getProtocol(), Op.EQ); sb.and("action", sb.entity().getAction(), Op.EQ); + sb.and("reason", sb.entity().getReason(), Op.EQ); if (tags != null && !tags.isEmpty()) { final SearchBuilder tagSearch = _resourceTagDao.createSearchBuilder(); @@ -730,6 +732,12 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ final SearchCriteria sc = sb.create(); + if (StringUtils.isNotBlank(keyword)) { + final SearchCriteria ssc = _networkACLItemDao.createSearchCriteria(); + ssc.addOr("protocol", SearchCriteria.Op.LIKE, "%" + keyword + "%"); + ssc.addOr("reason", SearchCriteria.Op.LIKE, "%" + keyword + "%"); + sc.addAnd("acl_id", SearchCriteria.Op.SC, ssc); + } if (id != null) { sc.setParameters("id", id); } @@ -747,7 +755,6 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ if (trafficType != null) { sc.setParameters("trafficType", trafficType); } - if (aclId != null) { // Get VPC and check access final NetworkACL acl = _networkACLDao.findById(aclId); @@ -764,7 +771,7 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ // aclId is not specified // List permitted VPCs and filter aclItems - final List permittedAccounts = new ArrayList(); + final List permittedAccounts = new ArrayList<>(); Long domainId = cmd.getDomainId(); boolean isRecursive = cmd.isRecursive(); final String accountName = cmd.getAccountName(); @@ -780,7 +787,7 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ final SearchCriteria scVpc = sbVpc.create(); _accountMgr.buildACLSearchCriteria(scVpc, domainId, isRecursive, permittedAccounts, listProjectResourcesCriteria); final List vpcs = _vpcDao.search(scVpc, null); - final List vpcIds = new ArrayList(); + final List vpcIds = new ArrayList<>(); for (final VpcVO vpc : vpcs) { vpcIds.add(vpc.getId()); } diff --git a/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java b/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java index 1872f85a26f..b956ccc16ed 100644 --- a/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java @@ -43,12 +43,22 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import com.cloud.configuration.ConfigurationManager; +import com.cloud.configuration.ConfigurationManagerImpl; +import com.cloud.bgp.BGPService; +import com.cloud.dc.ASNumberVO; +import com.cloud.dc.dao.ASNumberDao; +import com.cloud.dc.Vlan; +import com.cloud.network.dao.NsxProviderDao; +import com.cloud.network.element.NsxProviderVO; +import com.cloud.resourcelimit.CheckedReservation; +import com.google.common.collect.Sets; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.alert.AlertService; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.vpc.CreatePrivateGatewayByAdminCmd; +import org.apache.cloudstack.api.command.admin.vpc.CreateVPCCmdByAdmin; import org.apache.cloudstack.api.command.admin.vpc.CreateVPCOfferingCmd; import org.apache.cloudstack.api.command.admin.vpc.UpdateVPCOfferingCmd; import org.apache.cloudstack.api.command.user.vpc.CreatePrivateGatewayCmd; @@ -63,7 +73,10 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMap; +import org.apache.cloudstack.network.RoutedIpv4Manager; import org.apache.cloudstack.query.QueryService; +import org.apache.cloudstack.reservation.dao.ReservationDao; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.EnumUtils; import org.apache.commons.lang3.ObjectUtils; @@ -180,6 +193,8 @@ import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.NicDao; +import static com.cloud.offering.NetworkOffering.RoutingMode.Dynamic; + public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvisioningService, VpcService { public static final String SERVICE = "service"; @@ -237,6 +252,8 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @Inject ResourceLimitService _resourceLimitMgr; @Inject + ReservationDao reservationDao; + @Inject VpcServiceMapDao _vpcSrvcDao; @Inject DataCenterDao _dcDao; @@ -266,7 +283,15 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @Qualifier("networkHelper") protected NetworkHelper networkHelper; @Inject + private BGPService bgpService; + @Inject + private ASNumberDao asNumberDao; + @Inject private VpcPrivateGatewayTransactionCallable vpcTxCallable; + @Inject + private NsxProviderDao nsxProviderDao; + @Inject + RoutedIpv4Manager routedIpv4Manager; private final ScheduledExecutorService _executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("VpcChecker")); private List vpcElements = null; @@ -328,7 +353,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } createVpcOffering(VpcOffering.defaultVPCOfferingName, VpcOffering.defaultVPCOfferingName, svcProviderMap, true, State.Enabled, null, false, - false, false, false, null); + false, false, false, null, null, false); } // configure default vpc offering with Netscaler as LB Provider @@ -347,7 +372,8 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis svcProviderMap.put(svc, defaultProviders); } } - createVpcOffering(VpcOffering.defaultVPCNSOfferingName, VpcOffering.defaultVPCNSOfferingName, svcProviderMap, false, State.Enabled, null, false, false, false, false, null); + createVpcOffering(VpcOffering.defaultVPCNSOfferingName, VpcOffering.defaultVPCNSOfferingName, + svcProviderMap, false, State.Enabled, null, false, false, false, false, null, null, false); } @@ -368,7 +394,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } } createVpcOffering(VpcOffering.redundantVPCOfferingName, VpcOffering.redundantVPCOfferingName, svcProviderMap, true, State.Enabled, - null, false, false, true, false, null); + null, false, false, true, false, null, null, false); } // configure default vpc offering with NSX as network service provider in NAT mode @@ -385,7 +411,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } } createVpcOffering(VpcOffering.DEFAULT_VPC_NAT_NSX_OFFERING_NAME, VpcOffering.DEFAULT_VPC_NAT_NSX_OFFERING_NAME, svcProviderMap, false, - State.Enabled, null, false, false, false, true, NetworkOffering.NsxMode.NATTED.name()); + State.Enabled, null, false, false, false, true, NetworkOffering.NetworkMode.NATTED, null, false); } @@ -403,7 +429,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } } createVpcOffering(VpcOffering.DEFAULT_VPC_ROUTE_NSX_OFFERING_NAME, VpcOffering.DEFAULT_VPC_ROUTE_NSX_OFFERING_NAME, svcProviderMap, false, - State.Enabled, null, false, false, false, true, NetworkOffering.NsxMode.ROUTED.name()); + State.Enabled, null, false, false, false, true, NetworkOffering.NetworkMode.ROUTED, null, false); } } @@ -464,9 +490,18 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis final List domainIds = cmd.getDomainIds(); final List zoneIds = cmd.getZoneIds(); final Boolean forNsx = cmd.isForNsx(); - String nsxMode = cmd.getNsxMode(); + final String networkModeStr = cmd.getNetworkMode(); final boolean enable = cmd.getEnable(); - nsxMode = validateNsxMode(forNsx, nsxMode); + + NetworkOffering.NetworkMode networkMode = null; + if (networkModeStr != null) { + if (!EnumUtils.isValidEnum(NetworkOffering.NetworkMode.class, networkModeStr)) { + throw new InvalidParameterValueException("Invalid mode passed. Valid values: " + Arrays.toString(NetworkOffering.NetworkMode.values())); + } + networkMode = NetworkOffering.NetworkMode.valueOf(networkModeStr); + } + boolean specifyAsNumber = cmd.getSpecifyAsNumber(); + String routingModeString = cmd.getRoutingMode(); // check if valid domain if (CollectionUtils.isNotEmpty(cmd.getDomainIds())) { @@ -489,35 +524,31 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis _ntwkSvc.validateIfServiceOfferingIsActiveAndSystemVmTypeIsDomainRouter(serviceOfferingId); } - return createVpcOffering(vpcOfferingName, displayText, supportedServices, - serviceProviderList, serviceCapabilityList, internetProtocol, serviceOfferingId, forNsx, nsxMode, - domainIds, zoneIds, (enable ? State.Enabled : State.Disabled)); - } + NetworkOffering.RoutingMode routingMode = ConfigurationManagerImpl.verifyRoutingMode(routingModeString); - private String validateNsxMode(Boolean forNsx, String nsxMode) { - if (Boolean.TRUE.equals(forNsx)) { - if (Objects.isNull(nsxMode)) { - throw new InvalidParameterValueException("Mode for an NSX offering needs to be specified.Valid values: " + Arrays.toString(NetworkOffering.NsxMode.values())); - } - if (!EnumUtils.isValidEnum(NetworkOffering.NsxMode.class, nsxMode)) { - throw new InvalidParameterValueException("Invalid mode passed. Valid values: " + Arrays.toString(NetworkOffering.NsxMode.values())); - } - } else { - if (Objects.nonNull(nsxMode)) { - if (logger.isTraceEnabled()) { - logger.trace("nsxMode has is ignored for non-NSX enabled zones"); - } - nsxMode = null; - } + if (specifyAsNumber && !forNsx) { + String msg = "SpecifyAsNumber can only be true for VPC offerings for NSX"; + logger.error(msg); + throw new InvalidParameterValueException(msg); } - return nsxMode; + + if (specifyAsNumber && Dynamic != routingMode) { + String msg = "SpecifyAsNumber can only be true for Dynamic Route Mode network offerings"; + logger.error(msg); + throw new InvalidParameterValueException(msg); + } + + return createVpcOffering(vpcOfferingName, displayText, supportedServices, + serviceProviderList, serviceCapabilityList, internetProtocol, serviceOfferingId, forNsx, networkMode, + domainIds, zoneIds, (enable ? State.Enabled : State.Disabled), routingMode, specifyAsNumber); } @Override @ActionEvent(eventType = EventTypes.EVENT_VPC_OFFERING_CREATE, eventDescription = "creating vpc offering", create = true) public VpcOffering createVpcOffering(final String name, final String displayText, final List supportedServices, final Map> serviceProviders, final Map serviceCapabilityList, final NetUtils.InternetProtocol internetProtocol, final Long serviceOfferingId, - final Boolean forNsx, final String mode, List domainIds, List zoneIds, State state) { + final Boolean forNsx, final NetworkOffering.NetworkMode networkMode, List domainIds, List zoneIds, State state, + NetworkOffering.RoutingMode routingMode, boolean specifyAsNumber) { if (!Ipv6Service.Ipv6OfferingCreationEnabled.value() && !(internetProtocol == null || NetUtils.InternetProtocol.IPv4.equals(internetProtocol))) { throw new InvalidParameterValueException(String.format("Configuration %s needs to be enabled for creating IPv6 supported VPC offering", Ipv6Service.Ipv6OfferingCreationEnabled.key())); @@ -559,7 +590,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } } - if (!sourceNatSvc) { + if (!NetworkOffering.NetworkMode.ROUTED.equals(networkMode) && !sourceNatSvc) { logger.debug("Automatically adding source nat service to the list of VPC services"); svcProviderMap.put(Service.SourceNat, defaultProviders); } @@ -580,6 +611,11 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis if (provider == null) { throw new InvalidParameterValueException("Invalid service provider: " + prvNameStr); } + if (NetworkOffering.NetworkMode.ROUTED.equals(networkMode) + && Arrays.asList(Service.SourceNat, Service.StaticNat, Service.Lb, Service.PortForwarding, Service.Vpn).contains(service) + && Provider.VPCVirtualRouter.equals(provider)) { + throw new InvalidParameterValueException("SourceNat/StaticNat/Lb/PortForwarding/Vpn service are not supported by VPC in ROUTED mode"); + } providers.add(provider); } @@ -592,17 +628,21 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis // add gateway provider (if sourceNat provider is enabled) final Set sourceNatServiceProviders = svcProviderMap.get(Service.SourceNat); + Service redundantRouterService = Service.SourceNat; if (CollectionUtils.isNotEmpty(sourceNatServiceProviders)) { svcProviderMap.put(Service.Gateway, sourceNatServiceProviders); + } else if (NetworkOffering.NetworkMode.ROUTED.equals(networkMode)) { + svcProviderMap.put(Service.Gateway, Sets.newHashSet(Provider.VPCVirtualRouter)); + redundantRouterService = Service.Gateway; } validateConnectivtyServiceCapabilities(svcProviderMap.get(Service.Connectivity), serviceCapabilityList); final boolean supportsDistributedRouter = isVpcOfferingSupportsDistributedRouter(serviceCapabilityList); final boolean offersRegionLevelVPC = isVpcOfferingForRegionLevelVpc(serviceCapabilityList); - final boolean redundantRouter = isVpcOfferingRedundantRouter(serviceCapabilityList); + final boolean redundantRouter = isVpcOfferingRedundantRouter(serviceCapabilityList, redundantRouterService); final VpcOfferingVO offering = createVpcOffering(name, displayText, svcProviderMap, false, state, serviceOfferingId, supportsDistributedRouter, offersRegionLevelVPC, - redundantRouter, forNsx, mode); + redundantRouter, forNsx, networkMode, routingMode, specifyAsNumber); if (offering != null) { List detailsVO = new ArrayList<>(); @@ -630,7 +670,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @DB protected VpcOfferingVO createVpcOffering(final String name, final String displayText, final Map> svcProviderMap, final boolean isDefault, final State state, final Long serviceOfferingId, final boolean supportsDistributedRouter, final boolean offersRegionLevelVPC, - final boolean redundantRouter, Boolean forNsx, String mode) { + final boolean redundantRouter, Boolean forNsx, NetworkOffering.NetworkMode networkMode, NetworkOffering.RoutingMode routingMode, boolean specifyAsNumber) { return Transaction.execute(new TransactionCallback() { @Override @@ -642,7 +682,12 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis offering.setState(state); } offering.setForNsx(forNsx); - offering.setNsxMode(mode); + offering.setNetworkMode(networkMode); + offering.setSpecifyAsNumber(specifyAsNumber); + if (Objects.nonNull(routingMode)) { + offering.setRoutingMode(routingMode); + } + logger.debug("Adding vpc offering " + offering); offering = _vpcOffDao.persist(offering); // populate services and providers @@ -749,8 +794,8 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis return findCapabilityForService(serviceCapabilitystList, Capability.DistributedRouter, Service.Connectivity); } - private boolean isVpcOfferingRedundantRouter(final Map serviceCapabilitystList) { - return findCapabilityForService(serviceCapabilitystList, Capability.RedundantRouter, Service.SourceNat); + private boolean isVpcOfferingRedundantRouter(final Map serviceCapabilitystList, Service redundantRouterService) { + return findCapabilityForService(serviceCapabilitystList, Capability.RedundantRouter, redundantRouterService); } @Override @@ -1086,7 +1131,8 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @Override @ActionEvent(eventType = EventTypes.EVENT_VPC_CREATE, eventDescription = "creating vpc", create = true) public Vpc createVpc(final long zoneId, final long vpcOffId, final long vpcOwnerId, final String vpcName, final String displayText, final String cidr, String networkDomain, - final String ip4Dns1, final String ip4Dns2, final String ip6Dns1, final String ip6Dns2, final Boolean displayVpc, Integer publicMtu) throws ResourceAllocationException { + final String ip4Dns1, final String ip4Dns2, final String ip6Dns1, final String ip6Dns2, final Boolean displayVpc, Integer publicMtu, + final Integer cidrSize, final Long asNumber, final List bgpPeerIds) throws ResourceAllocationException { final Account caller = CallContext.current().getCallingAccount(); final Account owner = _accountMgr.getAccount(vpcOwnerId); @@ -1115,6 +1161,21 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis throw ex; } + if (NetworkOffering.RoutingMode.Dynamic.equals(vpcOff.getRoutingMode()) && vpcOff.isSpecifyAsNumber() && asNumber == null) { + throw new InvalidParameterValueException("AS number is required for the VPC but not passed."); + } + + // Validate VPC cidr/cidrsize + validateVpcCidrSize(caller, owner.getAccountId(), vpcOff, cidr, cidrSize); + + // Validate BGP peers + if (CollectionUtils.isNotEmpty(bgpPeerIds)) { + if (!routedIpv4Manager.isDynamicRoutedVpc(vpcOff)) { + throw new InvalidParameterValueException("The VPC offering does not support Dynamic routing"); + } + routedIpv4Manager.validateBgpPeers(owner, zone.getId(), bgpPeerIds); + } + final boolean isRegionLevelVpcOff = vpcOff.isOffersRegionLevelVPC(); if (isRegionLevelVpcOff && networkDomain == null) { throw new InvalidParameterValueException("Network domain must be specified for region level VPC"); @@ -1156,35 +1217,112 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis checkVpcDns(vpcOff, ip4Dns1, ip4Dns2, ip6Dns1, ip6Dns2); + // validate network domain + if (!NetUtils.verifyDomainName(networkDomain)) { + throw new InvalidParameterValueException("Invalid network domain. Total length shouldn't exceed 190 chars. Each domain " + + "label must be between 1 and 63 characters long, can contain ASCII letters 'a' through 'z', " + "the digits '0' through '9', " + + "and the hyphen ('-'); can't start or end with \"-\""); + } + final boolean useDistributedRouter = vpcOff.isSupportsDistributedRouter(); final VpcVO vpc = new VpcVO(zoneId, vpcName, displayText, owner.getId(), owner.getDomainId(), vpcOffId, cidr, networkDomain, useDistributedRouter, isRegionLevelVpcOff, vpcOff.isRedundantRouter(), ip4Dns1, ip4Dns2, ip6Dns1, ip6Dns2); vpc.setPublicMtu(publicMtu); vpc.setDisplay(Boolean.TRUE.equals(displayVpc)); - return createVpc(displayVpc, vpc); + if (vpc.getCidr() == null && cidrSize != null) { + // Allocate a CIDR for VPC + Ipv4GuestSubnetNetworkMap subnet = routedIpv4Manager.getOrCreateIpv4SubnetForVpc(vpc, cidrSize); + if (subnet != null) { + vpc.setCidr(subnet.getSubnet()); + } else { + throw new CloudRuntimeException("Failed to allocate a CIDR with requested size for VPC."); + } + } + + Vpc newVpc = createVpc(displayVpc, vpc); + // assign Ipv4 subnet to Routed VPC + if (routedIpv4Manager.isRoutedVpc(vpc)) { + routedIpv4Manager.assignIpv4SubnetToVpc(newVpc); + } + if (CollectionUtils.isNotEmpty(bgpPeerIds)) { + routedIpv4Manager.persistBgpPeersForVpc(newVpc.getId(), bgpPeerIds); + } + return newVpc; + } + + private void validateVpcCidrSize(Account caller, long accountId, VpcOffering vpcOffering, String cidr, Integer cidrSize) { + if (ObjectUtils.allNull(cidr, cidrSize)) { + throw new InvalidParameterValueException("VPC cidr or cidr size must be specified"); + } + if (ObjectUtils.allNotNull(cidr, cidrSize)) { + throw new InvalidParameterValueException("VPC cidr and cidr size are mutually exclusive"); + } + if (routedIpv4Manager.isVpcVirtualRouterGateway(vpcOffering)) { + if (cidr != null) { + if (!_accountMgr.isRootAdmin(caller.getId())) { + throw new InvalidParameterValueException("Only root admin can set the gateway/netmask of VPC with ROUTED mode"); + } + return; + } + // verify VPC cidrsize + Integer maxCidrSize = routedIpv4Manager.RoutedVpcIPv4MaxCidrSize.valueIn(accountId); + if (cidrSize > maxCidrSize) { + throw new InvalidParameterValueException("VPC cidr size cannot be bigger than maximum cidr size " + maxCidrSize); + } + Integer minCidrSize = routedIpv4Manager.RoutedVpcIPv4MinCidrSize.valueIn(accountId); + if (cidrSize < minCidrSize) { + throw new InvalidParameterValueException("VPC cidr size cannot be smaller than minimum cidr size " + minCidrSize); + } + } else { + if (cidrSize != null) { + throw new InvalidParameterValueException("VPC cidr size is only applicable on VPC with Routed mode"); + } + } } @Override @ActionEvent(eventType = EventTypes.EVENT_VPC_CREATE, eventDescription = "creating vpc", create = true) public Vpc createVpc(CreateVPCCmd cmd) throws ResourceAllocationException { + List bgpPeerIds = (cmd instanceof CreateVPCCmdByAdmin) ? ((CreateVPCCmdByAdmin)cmd).getBgpPeerIds() : null; Vpc vpc = createVpc(cmd.getZoneId(), cmd.getVpcOffering(), cmd.getEntityOwnerId(), cmd.getVpcName(), cmd.getDisplayText(), cmd.getCidr(), cmd.getNetworkDomain(), cmd.getIp4Dns1(), cmd.getIp4Dns2(), cmd.getIp6Dns1(), - cmd.getIp6Dns2(), cmd.isDisplay(), cmd.getPublicMtu()); + cmd.getIp6Dns2(), cmd.isDisplay(), cmd.getPublicMtu(), cmd.getCidrSize(), cmd.getAsNumber(), bgpPeerIds); String sourceNatIP = cmd.getSourceNatIP(); boolean forNsx = isVpcForNsx(vpc); - if (sourceNatIP != null || forNsx) { - if (forNsx) { - logger.info("Provided source NAT IP will be ignored in an NSX-enabled zone"); - sourceNatIP = null; + try { + if (sourceNatIP != null || forNsx) { + if (forNsx) { + logger.info("Provided source NAT IP will be ignored in an NSX-enabled zone"); + sourceNatIP = null; + } + logger.info(String.format("Trying to allocate the specified IP [%s] as the source NAT of VPC [%s].", sourceNatIP, vpc)); + allocateSourceNatIp(vpc, sourceNatIP); } - logger.info(String.format("Trying to allocate the specified IP [%s] as the source NAT of VPC [%s].", sourceNatIP, vpc)); - allocateSourceNatIp(vpc, sourceNatIP); + if (isVpcOfferingDynamicRouting(vpc)) { + bgpService.allocateASNumber(vpc.getZoneId(), cmd.getAsNumber(), null, vpc.getId()); + } + } catch (CloudRuntimeException ex) { + try { + deleteVpc(vpc.getId()); + } catch (Exception ex2) { + logger.error("Got exception when delete a VPC created just now: {}", ex2.getMessage()); + } + throw ex; } return vpc; } + private boolean isVpcOfferingDynamicRouting(Vpc vpc) { + VpcOffering vpcOffering = getVpcOffering(vpc.getVpcOfferingId()); + if (vpcOffering == null) { + logger.error(String.format("Cannot find VPC offering with ID %s", vpc.getVpcOfferingId())); + return false; + } + return NetworkOffering.RoutingMode.Dynamic == vpcOffering.getRoutingMode(); + } + private boolean isVpcForNsx(Vpc vpc) { if (vpc == null) { return false; @@ -1220,21 +1358,21 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @DB protected Vpc createVpc(final Boolean displayVpc, final VpcVO vpc) { final String cidr = vpc.getCidr(); - // Validate CIDR - if (!NetUtils.isValidIp4Cidr(cidr)) { - throw new InvalidParameterValueException("Invalid CIDR specified " + cidr); + if (cidr != null) { + // Validate CIDR + if (!NetUtils.isValidIp4Cidr(cidr)) { + throw new InvalidParameterValueException("Invalid CIDR specified " + cidr); + } + + // cidr has to be RFC 1918 complient + if (!NetUtils.validateGuestCidr(cidr, !ConfigurationManager.AllowNonRFC1918CompliantIPs.value())) { + throw new InvalidParameterValueException("Guest Cidr " + cidr + " is not RFC1918 compliant"); + } } - // cidr has to be RFC 1918 complient - if (!NetUtils.validateGuestCidr(cidr, !ConfigurationManager.AllowNonRFC1918CompliantIPs.value())) { - throw new InvalidParameterValueException("Guest Cidr " + cidr + " is not RFC1918 compliant"); - } - - // validate network domain - if (!NetUtils.verifyDomainName(vpc.getNetworkDomain())) { - throw new InvalidParameterValueException("Invalid network domain. Total length shouldn't exceed 190 chars. Each domain " - + "label must be between 1 and 63 characters long, can contain ASCII letters 'a' through 'z', " + "the digits '0' through '9', " - + "and the hyphen ('-'); can't start or end with \"-\""); + // get or create Ipv4 subnet for ROUTED VPC + if (routedIpv4Manager.isRoutedVpc(vpc)) { + routedIpv4Manager.getOrCreateIpv4SubnetForVpc(vpc, cidr); } VpcVO vpcVO = Transaction.execute(new TransactionCallback() { @@ -1780,6 +1918,17 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis return result; } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_VPC_CREATE, eventDescription = "creating vpc", async = true) + public void startVpc(final CreateVPCCmd cmd) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { + if (!cmd.isStart()) { + logger.debug("Not starting VPC as " + ApiConstants.START + "=false was passed to the API"); + return; + } + startVpc(cmd.getEntityId(), true); + } + protected boolean startVpc(final Vpc vpc, final DeployDestination dest, final ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { // deploy provider @@ -1904,7 +2053,8 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis // 2) Only Isolated networks with Source nat service enabled can be // added to vpc - if (!guestNtwkOff.isForNsx() && !(guestNtwkOff.getGuestType() == GuestType.Isolated && supportedSvcs.contains(Service.SourceNat))) { + if (!guestNtwkOff.isForNsx() + && !(guestNtwkOff.getGuestType() == GuestType.Isolated && (supportedSvcs.contains(Service.SourceNat) || supportedSvcs.contains(Service.Gateway)))) { throw new InvalidParameterValueException("Only network offerings of type " + GuestType.Isolated + " with service " + Service.SourceNat.getName() + " are valid for vpc "); @@ -2096,8 +2246,18 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis _networkAclMgr.deleteNetworkACL(networkAcl); } + routedIpv4Manager.releaseBgpPeersForVpc(vpcId); + routedIpv4Manager.releaseIpv4SubnetForVpc(vpcId); + VpcVO vpc = vpcDao.findById(vpcId); annotationDao.removeByEntityType(AnnotationService.EntityType.VPC.name(), vpc.getUuid()); + + ASNumberVO asNumber = asNumberDao.findByZoneAndVpcId(vpc.getZoneId(), vpc.getId()); + if (asNumber != null) { + logger.debug(String.format("Releasing AS number %s from VPC %s", asNumber.getAsNumber(), vpc.getName())); + bgpService.releaseASNumber(vpc.getZoneId(), asNumber.getAsNumber(), true); + } + return success; } @@ -3018,10 +3178,11 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis logger.debug("Associating ip " + ipToAssoc + " to vpc " + vpc); - final boolean isSourceNatFinal = isSrcNatIpRequired(vpc.getVpcOfferingId()) && getExistingSourceNatInVpc(vpc.getAccountId(), vpcId) == null; - Transaction.execute(new TransactionCallbackNoReturn() { - @Override - public void doInTransactionWithoutResult(final TransactionStatus status) { + final boolean isSourceNatFinal = isSrcNatIpRequired(vpc.getVpcOfferingId()) && getExistingSourceNatInVpc(vpc.getAccountId(), vpcId, false) == null; + try (CheckedReservation publicIpReservation = new CheckedReservation(owner, ResourceType.public_ip, 1l, reservationDao, _resourceLimitMgr)) { + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(final TransactionStatus status) { final IPAddressVO ip = _ipAddressDao.findById(ipId); // update ip address with networkId ip.setVpcId(vpcId); @@ -3031,8 +3192,12 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis // mark ip as allocated _ipAddrMgr.markPublicIpAsAllocated(ip); - } - }); + } + }); + } catch (Exception e) { + logger.error("Failed to associate ip " + ipToAssoc + " to vpc " + vpc, e); + throw new CloudRuntimeException("Failed to associate ip " + ipToAssoc + " to vpc " + vpc, e); + } logger.debug("Successfully assigned ip " + ipToAssoc + " to vpc " + vpc); CallContext.current().putContextParameter(IpAddress.class, ipToAssoc.getUuid()); @@ -3080,8 +3245,9 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @Override public Network createVpcGuestNetwork(final long ntwkOffId, final String name, final String displayText, final String gateway, final String cidr, final String vlanId, String networkDomain, final Account owner, final Long domainId, final PhysicalNetwork pNtwk, final long zoneId, final ACLType aclType, final Boolean subdomainAccess, - final long vpcId, final Long aclId, final Account caller, final Boolean isDisplayNetworkEnabled, String externalId, String ip6Gateway, String ip6Cidr, final String ip4Dns1, final String ip4Dns2, final String ip6Dns1, final String ip6Dns2, Pair vrIfaceMTUs) throws ConcurrentOperationException, InsufficientCapacityException, - ResourceAllocationException { + final long vpcId, final Long aclId, final Account caller, final Boolean isDisplayNetworkEnabled, String externalId, String ip6Gateway, String ip6Cidr, + final String ip4Dns1, final String ip4Dns2, final String ip6Dns1, final String ip6Dns2, Pair vrIfaceMTUs, Integer networkCidrSize) + throws ConcurrentOperationException, InsufficientCapacityException, ResourceAllocationException { final Vpc vpc = getActiveVpc(vpcId); @@ -3105,7 +3271,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis // 2) Create network final Network guestNetwork = _ntwkMgr.createGuestNetwork(ntwkOffId, name, displayText, gateway, cidr, vlanId, false, networkDomain, owner, domainId, pNtwk, zoneId, aclType, - subdomainAccess, vpcId, ip6Gateway, ip6Cidr, isDisplayNetworkEnabled, null, null, externalId, null, null, ip4Dns1, ip4Dns2, ip6Dns1, ip6Dns2, vrIfaceMTUs); + subdomainAccess, vpcId, ip6Gateway, ip6Cidr, isDisplayNetworkEnabled, null, null, externalId, null, null, ip4Dns1, ip4Dns2, ip6Dns1, ip6Dns2, vrIfaceMTUs, networkCidrSize); if (guestNetwork != null) { guestNetwork.setNetworkACLId(aclId); @@ -3114,7 +3280,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis return guestNetwork; } - protected IPAddressVO getExistingSourceNatInVpc(final long ownerId, final long vpcId) { + protected IPAddressVO getExistingSourceNatInVpc(final long ownerId, final long vpcId, final boolean forNsx) { final List addrs = listPublicIpsAssignedToVpc(ownerId, true, vpcId); @@ -3125,8 +3291,16 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis // Account already has ip addresses for (final IPAddressVO addr : addrs) { if (addr.isSourceNat()) { - sourceNatIp = addr; - return sourceNatIp; + if (!forNsx) { + sourceNatIp = addr; + } else { + if (addr.isForSystemVms()) { + sourceNatIp = addr; + } + } + if (Objects.nonNull(sourceNatIp)) { + return sourceNatIp; + } } } @@ -3150,17 +3324,23 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } @Override - public PublicIp assignSourceNatIpAddressToVpc(final Account owner, final Vpc vpc) throws InsufficientAddressCapacityException, ConcurrentOperationException { + public PublicIp assignSourceNatIpAddressToVpc(final Account owner, final Vpc vpc, final Long podId) throws InsufficientAddressCapacityException, ConcurrentOperationException { final long dcId = vpc.getZoneId(); + NsxProviderVO nsxProvider = nsxProviderDao.findByZoneId(dcId); + boolean forNsx = nsxProvider != null; - final IPAddressVO sourceNatIp = getExistingSourceNatInVpc(owner.getId(), vpc.getId()); + final IPAddressVO sourceNatIp = getExistingSourceNatInVpc(owner.getId(), vpc.getId(), forNsx); PublicIp ipToReturn = null; if (sourceNatIp != null) { ipToReturn = PublicIp.createFromAddrAndVlan(sourceNatIp, _vlanDao.findById(sourceNatIp.getVlanId())); } else { - ipToReturn = _ipAddrMgr.assignDedicateIpAddress(owner, null, vpc.getId(), dcId, true); + if (forNsx) { + ipToReturn = _ipAddrMgr.assignPublicIpAddress(dcId, podId, owner, Vlan.VlanType.VirtualNetwork, null, null, false, true); + } else { + ipToReturn = _ipAddrMgr.assignDedicateIpAddress(owner, null, vpc.getId(), dcId, true); + } } return ipToReturn; @@ -3198,8 +3378,11 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @Override public boolean isSrcNatIpRequired(long vpcOfferingId) { final Map> vpcOffSvcProvidersMap = getVpcOffSvcProvidersMap(vpcOfferingId); - return Objects.nonNull(vpcOffSvcProvidersMap.get(Network.Service.SourceNat)) && (vpcOffSvcProvidersMap.get(Network.Service.SourceNat).contains(Network.Provider.VPCVirtualRouter) || - vpcOffSvcProvidersMap.get(Service.SourceNat).contains(Provider.Nsx)); + return (Objects.nonNull(vpcOffSvcProvidersMap.get(Network.Service.SourceNat)) + && (vpcOffSvcProvidersMap.get(Network.Service.SourceNat).contains(Network.Provider.VPCVirtualRouter) + || vpcOffSvcProvidersMap.get(Service.SourceNat).contains(Provider.Nsx))) + || (Objects.nonNull(vpcOffSvcProvidersMap.get(Network.Service.Gateway)) + && vpcOffSvcProvidersMap.get(Service.Gateway).contains(Network.Provider.VPCVirtualRouter)); } /** diff --git a/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java b/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java index 6fdf54936b0..6cef834b0f7 100644 --- a/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java @@ -153,6 +153,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc return vpns; } + @Override @DB public RemoteAccessVpn createRemoteAccessVpn(final long publicIpId, String ipRange, boolean openFirewall, final Boolean forDisplay) throws NetworkRuleConflictException { @@ -170,92 +171,97 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc throw new InvalidParameterValueException("The Ip address is not ready to be used yet: " + ipAddr.getAddress()); } - IPAddressVO ipAddress = _ipAddressDao.findById(publicIpId); + try { + IPAddressVO ipAddress = _ipAddressDao.acquireInLockTable(publicIpId); - Long networkId = ipAddress.getAssociatedWithNetworkId(); - if (networkId != null) { - _networkMgr.checkIpForService(ipAddress, Service.Vpn, null); - } - - final Long vpcId = ipAddress.getVpcId(); - if (vpcId != null && ipAddress.isSourceNat()) { - assert networkId == null; - openFirewall = false; - } - - final boolean openFirewallFinal = openFirewall; - - if (networkId == null && vpcId == null) { - throw new InvalidParameterValueException("Unable to create remote access vpn for the ipAddress: " + ipAddr.getAddress().addr() + - " as ip is not associated with any network or VPC"); - } - - RemoteAccessVpnVO vpnVO = _remoteAccessVpnDao.findByPublicIpAddress(publicIpId); - - if (vpnVO != null) { - if (vpnVO.getState() == RemoteAccessVpn.State.Added) { - return vpnVO; + if (ipAddress == null) { + logger.error(String.format("Unable to acquire lock on public IP %s.", publicIpId)); + throw new CloudRuntimeException("Unable to acquire lock on public IP."); } - throw new InvalidParameterValueException(String.format("A remote Access VPN already exists for the public IP address [%s].", ipAddr.getAddress().toString())); - } + Long networkId = ipAddress.getAssociatedWithNetworkId(); + if (networkId != null) { + _networkMgr.checkIpForService(ipAddress, Service.Vpn, null); + } - if (ipRange == null) { - ipRange = RemoteAccessVpnClientIpRange.valueIn(ipAddr.getAccountId()); - } + final Long vpcId = ipAddress.getVpcId(); + if (vpcId != null && ipAddress.isSourceNat()) { + assert networkId == null; + openFirewall = false; + } - validateIpRange(ipRange, InvalidParameterValueException.class); + final boolean openFirewallFinal = openFirewall; - String[] range = ipRange.split("-"); + if (networkId == null && vpcId == null) { + throw new InvalidParameterValueException("Unable to create remote access vpn for the ipAddress: " + ipAddr.getAddress().addr() + + " as ip is not associated with any network or VPC"); + } - Pair cidr = null; + RemoteAccessVpnVO vpnVO = _remoteAccessVpnDao.findByPublicIpAddress(publicIpId); - if (networkId != null) { - long ipAddressOwner = ipAddr.getAccountId(); - vpnVO = _remoteAccessVpnDao.findByAccountAndNetwork(ipAddressOwner, networkId); if (vpnVO != null) { if (vpnVO.getState() == RemoteAccessVpn.State.Added) { return vpnVO; } - throw new InvalidParameterValueException(String.format("A remote access VPN already exists for the account [%s].", ipAddressOwner)); + throw new InvalidParameterValueException(String.format("A remote Access VPN already exists for the public IP address [%s].", ipAddr.getAddress().toString())); } - Network network = _networkMgr.getNetwork(networkId); - if (!_networkMgr.areServicesSupportedInNetwork(network.getId(), Service.Vpn)) { - throw new InvalidParameterValueException("Vpn service is not supported in network id=" + ipAddr.getAssociatedWithNetworkId()); + + if (ipRange == null) { + ipRange = RemoteAccessVpnClientIpRange.valueIn(ipAddr.getAccountId()); } - cidr = NetUtils.getCidr(network.getCidr()); - } else { - Vpc vpc = _vpcDao.findById(vpcId); - cidr = NetUtils.getCidr(vpc.getCidr()); - } - String[] guestIpRange = NetUtils.getIpRangeFromCidr(cidr.first(), cidr.second()); - if (NetUtils.ipRangesOverlap(range[0], range[1], guestIpRange[0], guestIpRange[1])) { - throw new InvalidParameterValueException("Invalid ip range: " + ipRange + " overlaps with guest ip range " + guestIpRange[0] + "-" + guestIpRange[1]); - } + validateIpRange(ipRange, InvalidParameterValueException.class); - long startIp = NetUtils.ip2Long(range[0]); - final String newIpRange = NetUtils.long2Ip(++startIp) + "-" + range[1]; - final String sharedSecret = PasswordGenerator.generatePresharedKey(_pskLength); + String[] range = ipRange.split("-"); - return Transaction.execute(new TransactionCallbackWithException() { - @Override - public RemoteAccessVpn doInTransaction(TransactionStatus status) throws NetworkRuleConflictException { + Pair cidr = null; + + if (networkId != null) { + long ipAddressOwner = ipAddr.getAccountId(); + vpnVO = _remoteAccessVpnDao.findByAccountAndNetwork(ipAddressOwner, networkId); + if (vpnVO != null) { + if (vpnVO.getState() == RemoteAccessVpn.State.Added) { + return vpnVO; + } + + throw new InvalidParameterValueException(String.format("A remote access VPN already exists for the account [%s].", ipAddressOwner)); + } + Network network = _networkMgr.getNetwork(networkId); + if (!_networkMgr.areServicesSupportedInNetwork(network.getId(), Service.Vpn)) { + throw new InvalidParameterValueException("Vpn service is not supported in network id=" + ipAddr.getAssociatedWithNetworkId()); + } + cidr = NetUtils.getCidr(network.getCidr()); + } else { + Vpc vpc = _vpcDao.findById(vpcId); + cidr = NetUtils.getCidr(vpc.getCidr()); + } + + String[] guestIpRange = NetUtils.getIpRangeFromCidr(cidr.first(), cidr.second()); + if (NetUtils.ipRangesOverlap(range[0], range[1], guestIpRange[0], guestIpRange[1])) { + throw new InvalidParameterValueException("Invalid ip range: " + ipRange + " overlaps with guest ip range " + guestIpRange[0] + "-" + guestIpRange[1]); + } + + long startIp = NetUtils.ip2Long(range[0]); + final String newIpRange = NetUtils.long2Ip(++startIp) + "-" + range[1]; + final String sharedSecret = PasswordGenerator.generatePresharedKey(_pskLength); + + return Transaction.execute((TransactionCallbackWithException) status -> { if (vpcId == null) { _rulesMgr.reservePorts(ipAddr, NetUtils.UDP_PROTO, Purpose.Vpn, openFirewallFinal, caller, NetUtils.VPN_PORT, NetUtils.VPN_L2TP_PORT, - NetUtils.VPN_NATT_PORT); + NetUtils.VPN_NATT_PORT); } - RemoteAccessVpnVO vpnVO = - new RemoteAccessVpnVO(ipAddr.getAccountId(), ipAddr.getDomainId(), ipAddr.getAssociatedWithNetworkId(), publicIpId, vpcId, range[0], newIpRange, - sharedSecret); + RemoteAccessVpnVO remoteAccessVpnVO = new RemoteAccessVpnVO(ipAddr.getAccountId(), ipAddr.getDomainId(), ipAddr.getAssociatedWithNetworkId(), + publicIpId, vpcId, range[0], newIpRange, sharedSecret); if (forDisplay != null) { - vpnVO.setDisplay(forDisplay); + remoteAccessVpnVO.setDisplay(forDisplay); } - return _remoteAccessVpnDao.persist(vpnVO); - } - }); + return _remoteAccessVpnDao.persist(remoteAccessVpnVO); + }); + } finally { + _ipAddressDao.releaseFromLockTable(publicIpId); + } } private void validateRemoteAccessVpnConfiguration() throws ConfigurationException { diff --git a/server/src/main/java/com/cloud/projects/ProjectManager.java b/server/src/main/java/com/cloud/projects/ProjectManager.java index 8615894990d..123284955fa 100644 --- a/server/src/main/java/com/cloud/projects/ProjectManager.java +++ b/server/src/main/java/com/cloud/projects/ProjectManager.java @@ -22,11 +22,14 @@ import com.cloud.user.Account; import org.apache.cloudstack.framework.config.ConfigKey; public interface ProjectManager extends ProjectService { - public static final ConfigKey ProjectSmtpUseStartTLS = new ConfigKey("Advanced", Boolean.class, "project.smtp.useStartTLS", "false", + public static final ConfigKey ProjectSmtpUseStartTLS = new ConfigKey(ConfigKey.CATEGORY_ADVANCED, Boolean.class, "project.smtp.useStartTLS", "false", "If set to true and if we enable security via project.smtp.useAuth, this will enable StartTLS to secure the connection.", true); - public static final ConfigKey ProjectSmtpEnabledSecurityProtocols = new ConfigKey("Advanced", String.class, "project.smtp.enabledSecurityProtocols", "", - "White-space separated security protocols; ex: \"TLSv1 TLSv1.1\". Supported protocols: SSLv2Hello, SSLv3, TLSv1, TLSv1.1 and TLSv1.2", true); + public static final ConfigKey ProjectSmtpEnabledSecurityProtocols = new ConfigKey(ConfigKey.CATEGORY_ADVANCED, String.class, "project.smtp.enabledSecurityProtocols", "", + "White-space separated security protocols; ex: \"TLSv1 TLSv1.1\". Supported protocols: SSLv2Hello, SSLv3, TLSv1, TLSv1.1 and TLSv1.2", true, ConfigKey.Kind.WhitespaceSeparatedListWithOptions, "SSLv2Hello,SSLv3,TLSv1,TLSv1.1,TLSv1.2"); + + public static final ConfigKey ProjectSmtpUseAuth = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Boolean.class, "project.smtp.useAuth", "false", + "If true, use SMTP authentication when sending emails", false, ConfigKey.Scope.ManagementServer); boolean canAccessProjectAccount(Account caller, long accountId); diff --git a/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java b/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java index cb1623b5858..16e3925330d 100644 --- a/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java +++ b/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java @@ -18,6 +18,7 @@ package com.cloud.projects; import java.io.UnsupportedEncodingException; import java.security.SecureRandom; +import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -35,6 +36,7 @@ import javax.inject.Inject; import javax.mail.MessagingException; import javax.naming.ConfigurationException; +import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.ProjectRole; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.acl.dao.ProjectRoleDao; @@ -48,7 +50,9 @@ import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.utils.mailing.MailAddress; import org.apache.cloudstack.utils.mailing.SMTPMailProperties; import org.apache.cloudstack.utils.mailing.SMTPMailSender; +import org.apache.cloudstack.webhook.WebhookHelper; import org.apache.commons.lang3.BooleanUtils; +import org.springframework.beans.factory.NoSuchBeanDefinitionException; import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; @@ -89,6 +93,7 @@ import com.cloud.user.ResourceLimitService; import com.cloud.user.User; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; @@ -163,6 +168,17 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C private String senderAddress; protected SMTPMailSender mailSender; + protected List listWebhooksForProject(Project project) { + List webhooks = new ArrayList<>(); + try { + WebhookHelper webhookService = ComponentContext.getDelegateComponentOfType(WebhookHelper.class); + webhooks = webhookService.listWebhooksByAccount(project.getProjectAccountId()); + } catch (NoSuchBeanDefinitionException ignored) { + logger.debug("No WebhookHelper bean found"); + } + return webhooks; + } + @Override public boolean configure(final String name, final Map params) throws ConfigurationException { @@ -277,16 +293,16 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C assignAccountToProject(project, ownerFinal.getId(), ProjectAccount.Role.Admin, Optional.ofNullable(finalUser).map(User::getId).orElse(null), null); - if (project != null) { - CallContext.current().setEventDetails("Project id=" + project.getId()); - CallContext.current().putContextParameter(Project.class, project.getUuid()); - } + if (project != null) { + CallContext.current().setEventDetails("Project id=" + project.getId()); + CallContext.current().putContextParameter(Project.class, project.getUuid()); + } - //Increment resource count + //Increment resource count _resourceLimitMgr.incrementResourceCount(ownerFinal.getId(), ResourceType.project); - return project; - } + return project; + } }); messageBus.publish(_name, ProjectManager.MESSAGE_CREATE_TUNGSTEN_PROJECT_EVENT, PublishScope.LOCAL, project); @@ -339,8 +355,9 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C List volumes = _volumeDao.findDetachedByAccount(project.getProjectAccountId()); List networks = _networkDao.listByOwner(project.getProjectAccountId()); List vpcs = _vpcMgr.getVpcsForAccount(project.getProjectAccountId()); + List webhooks = listWebhooksForProject(project); - Optional message = Stream.of(userTemplates, vmSnapshots, vms, volumes, networks, vpcs) + Optional message = Stream.of(userTemplates, vmSnapshots, vms, volumes, networks, vpcs, webhooks) .filter(entity -> !entity.isEmpty()) .map(entity -> entity.size() + " " + entity.get(0).getEntityType().getSimpleName() + " to clean up") .findFirst(); @@ -1273,7 +1290,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C } @Override - @ActionEvent(eventType = EventTypes.EVENT_PROJECT_ACTIVATE, eventDescription = "activating project") + @ActionEvent(eventType = EventTypes.EVENT_PROJECT_ACTIVATE, eventDescription = "activating project", async = true) @DB public Project activateProject(final long projectId) { Account caller = CallContext.current().getCallingAccount(); @@ -1434,7 +1451,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] {ProjectSmtpEnabledSecurityProtocols, ProjectSmtpUseStartTLS}; + return new ConfigKey[] {ProjectSmtpEnabledSecurityProtocols, ProjectSmtpUseStartTLS, ProjectSmtpUseAuth}; } protected void updateProjectNameAndDisplayText(final ProjectVO project, String name, String displayText) { diff --git a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java index d102470fe08..22837389620 100755 --- a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java @@ -38,6 +38,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import com.cloud.alert.AlertManager; +import com.cloud.cpu.CPU; import com.cloud.exception.StorageConflictException; import com.cloud.exception.StorageUnavailableException; import com.cloud.host.HostTagVO; @@ -426,6 +427,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, String url = cmd.getUrl(); final String username = cmd.getUsername(); final String password = cmd.getPassword(); + CPU.CPUArch arch = cmd.getArch(); if (url != null) { url = URLDecoder.decode(url); @@ -525,6 +527,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, cluster.setClusterType(clusterType); cluster.setAllocationState(allocationState); + cluster.setArch(arch.getType()); try { cluster = _clusterDao.persist(cluster); } catch (final Exception e) { @@ -1141,6 +1144,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, String allocationState = cmd.getAllocationState(); String managedstate = cmd.getManagedstate(); String name = cmd.getClusterName(); + CPU.CPUArch arch = cmd.getArch(); // Verify cluster information and update the cluster if needed boolean doUpdate = false; @@ -1213,6 +1217,11 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } } + if (arch != null) { + cluster.setArch(arch.getType()); + doUpdate = true; + } + if (doUpdate) { _clusterDao.update(cluster.getId(), cluster); } @@ -2353,6 +2362,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, host.setLastPinged(System.currentTimeMillis() >> 10); host.setHostTags(hostTags, false); host.setDetails(details); + host.setArch(CPU.CPUArch.fromType(startup.getArch())); if (startup.getStorageIpAddressDeux() != null) { host.setStorageIpAddressDeux(startup.getStorageIpAddressDeux()); host.setStorageMacAddressDeux(startup.getStorageMacAddressDeux()); @@ -2716,7 +2726,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } if (existingPrivateIPs.size() == 1) { final DataCenterIpAddressVO vo = existingPrivateIPs.get(0); - if (vo.getInstanceId() != null) { + if (vo.getNicId() != null) { throw new IllegalArgumentException("The private ip address of the server (" + serverPrivateIP + ") is already in use in pod: " + pod.getName() + " and zone: " + dc.getName()); } @@ -2746,6 +2756,13 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, throw new IllegalArgumentException("Can't add host whose hypervisor type is: " + hyType + " into cluster: " + clusterVO.getId() + " whose hypervisor type is: " + clusterVO.getHypervisorType()); } + CPU.CPUArch hostCpuArch = CPU.CPUArch.fromType(ssCmd.getCpuArch()); + if (hostCpuArch != null && clusterVO.getArch() != null && hostCpuArch != clusterVO.getArch()) { + String msg = String.format("Can't add a host whose arch is: %s into cluster of arch type: %s", + hostCpuArch.getType(), clusterVO.getArch().getType()); + logger.error(msg); + throw new IllegalArgumentException(msg); + } final Map hostDetails = ssCmd.getHostDetails(); if (hostDetails != null) { @@ -2764,6 +2781,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, host.setCaps(ssCmd.getCapabilities()); host.setCpuSockets(ssCmd.getCpuSockets()); host.setCpus(ssCmd.getCpus()); + host.setArch(hostCpuArch); host.setTotalMemory(ssCmd.getMemory()); host.setSpeed(ssCmd.getSpeed()); host.setHypervisorType(hyType); @@ -3371,6 +3389,15 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, return sc.list(); } + @Override + public List listAllUpHostsInOneZoneByHypervisor(final HypervisorType type, final long dcId) { + final QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getHypervisorType(), Op.EQ, type); + sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId); + sc.and(sc.entity().getStatus(), Op.EQ, Status.Up); + return sc.list(); + } + @Override public List listAllUpAndEnabledHostsInOneZone(final long dcId) { final QueryBuilder sc = QueryBuilder.create(HostVO.class); @@ -3391,7 +3418,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, @Override public List listAvailableGPUDevice(final long hostId, final String groupName, final String vgpuType) { - final Filter searchFilter = new Filter(VGPUTypesVO.class, "remainingCapacity", false, null, null); + Filter searchFilter = new Filter(null, null); + searchFilter.addOrderBy(VGPUTypesVO.class, "remainingCapacity", false, "groupId"); final SearchCriteria sc = _gpuAvailability.create(); sc.setParameters("hostId", hostId); sc.setParameters("groupName", groupName); @@ -3400,6 +3428,26 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, return _hostGpuGroupsDao.customSearch(sc, searchFilter); } + @Override + public List listAllHostsInOneZoneNotInClusterByHypervisor(final HypervisorType type, final long dcId, final long clusterId) { + final QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getHypervisorType(), Op.EQ, type); + sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId); + sc.and(sc.entity().getClusterId(), Op.NEQ, clusterId); + sc.and(sc.entity().getStatus(), Op.EQ, Status.Up); + return sc.list(); + } + + @Override + public List listAllHostsInOneZoneNotInClusterByHypervisors(List types, final long dcId, final long clusterId) { + final QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getHypervisorType(), Op.IN, types); + sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId); + sc.and(sc.entity().getClusterId(), Op.NEQ, clusterId); + sc.and(sc.entity().getStatus(), Op.EQ, Status.Up); + return sc.list(); + } + @Override public boolean isGPUDeviceAvailable(final long hostId, final String groupName, final String vgpuType) { if(!listAvailableGPUDevice(hostId, groupName, vgpuType).isEmpty()) { diff --git a/server/src/main/java/com/cloud/resourcelimit/CheckedReservation.java b/server/src/main/java/com/cloud/resourcelimit/CheckedReservation.java index 237e3a5585e..d66e1eb912a 100644 --- a/server/src/main/java/com/cloud/resourcelimit/CheckedReservation.java +++ b/server/src/main/java/com/cloud/resourcelimit/CheckedReservation.java @@ -62,12 +62,28 @@ public class CheckedReservation implements AutoCloseable { return String.format("%s-%s", ResourceReservation.class.getSimpleName(), type.getName()); } + private void removeAllReservations() { + if (CollectionUtils.isEmpty(reservations)) { + return; + } + CallContext.current().removeContextParameter(getContextParameterKey()); + for (ResourceReservation reservation : reservations) { + reservationDao.remove(reservation.getId()); + } + this.reservations = null; + } + protected void checkLimitAndPersistReservations(Account account, ResourceType resourceType, Long resourceId, List resourceLimitTags, Long amount) throws ResourceAllocationException { - checkLimitAndPersistReservation(account, resourceType, resourceId, null, amount); - if (CollectionUtils.isNotEmpty(resourceLimitTags)) { - for (String tag : resourceLimitTags) { - checkLimitAndPersistReservation(account, resourceType, resourceId, tag, amount); + try { + checkLimitAndPersistReservation(account, resourceType, resourceId, null, amount); + if (CollectionUtils.isNotEmpty(resourceLimitTags)) { + for (String tag : resourceLimitTags) { + checkLimitAndPersistReservation(account, resourceType, resourceId, tag, amount); + } } + } catch (ResourceAllocationException rae) { + removeAllReservations(); + throw rae; } } @@ -147,14 +163,7 @@ public class CheckedReservation implements AutoCloseable { @Override public void close() throws Exception { - if (CollectionUtils.isEmpty(reservations)) { - return; - } - CallContext.current().removeContextParameter(getContextParameterKey()); - for (ResourceReservation reservation : reservations) { - reservationDao.remove(reservation.getId()); - } - reservations = null; + removeAllReservations(); } public Account getAccount() { diff --git a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java index 4455c472113..b59ddc029ee 100644 --- a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java +++ b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java @@ -20,6 +20,7 @@ import static com.cloud.utils.NumbersUtil.toHumanReadableSize; import java.util.ArrayList; import java.util.Arrays; +import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -219,8 +220,16 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim }); } + private void cleanupResourceReservationsForMs() { + int reservationsRemoved = reservationDao.removeByMsId(ManagementServerNode.getManagementServerId()); + if (reservationsRemoved > 0) { + logger.warn("Removed {} resource reservations for management server id {}", reservationsRemoved, ManagementServerNode.getManagementServerId()); + } + } + @Override public boolean start() { + cleanupResourceReservationsForMs(); if (ResourceCountCheckInterval.value() >= 0) { ConfigKeyScheduledExecutionWrapper runner = new ConfigKeyScheduledExecutionWrapper(_rcExecutor, new ResourceCountCheckTask(), ResourceCountCheckInterval, TimeUnit.SECONDS); runner.start(); @@ -230,6 +239,10 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim @Override public boolean stop() { + if (_rcExecutor != null) { + _rcExecutor.shutdown(); + } + cleanupResourceReservationsForMs(); return true; } @@ -288,6 +301,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim accountResourceLimitMap.put(Resource.ResourceType.memory.name(), Long.parseLong(_configDao.getValue(Config.DefaultMaxAccountMemory.key()))); accountResourceLimitMap.put(Resource.ResourceType.primary_storage.name(), Long.parseLong(_configDao.getValue(Config.DefaultMaxAccountPrimaryStorage.key()))); accountResourceLimitMap.put(Resource.ResourceType.secondary_storage.name(), MaxAccountSecondaryStorage.value()); + accountResourceLimitMap.put(Resource.ResourceType.project.name(), DefaultMaxAccountProjects.value()); domainResourceLimitMap.put(Resource.ResourceType.public_ip.name(), Long.parseLong(_configDao.getValue(Config.DefaultMaxDomainPublicIPs.key()))); domainResourceLimitMap.put(Resource.ResourceType.snapshot.name(), Long.parseLong(_configDao.getValue(Config.DefaultMaxDomainSnapshots.key()))); @@ -300,6 +314,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim domainResourceLimitMap.put(Resource.ResourceType.memory.name(), Long.parseLong(_configDao.getValue(Config.DefaultMaxDomainMemory.key()))); domainResourceLimitMap.put(Resource.ResourceType.primary_storage.name(), Long.parseLong(_configDao.getValue(Config.DefaultMaxDomainPrimaryStorage.key()))); domainResourceLimitMap.put(Resource.ResourceType.secondary_storage.name(), Long.parseLong(_configDao.getValue(Config.DefaultMaxDomainSecondaryStorage.key()))); + domainResourceLimitMap.put(Resource.ResourceType.project.name(), DefaultMaxDomainProjects.value()); } catch (NumberFormatException e) { logger.error("NumberFormatException during configuration", e); throw new ConfigurationException("Configuration failed due to NumberFormatException, see log for the stacktrace"); @@ -1200,8 +1215,22 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim }); } + protected void cleanupStaleResourceReservations(final long accountId, final ResourceType type, String tag) { + Long delay = ResourceReservationCleanupDelay.value(); + if (delay == null || delay <= 0) { + return; + } + Date cleanupBefore = new Date(System.currentTimeMillis() - delay * 1000); + int rowsRemoved = reservationDao.removeStaleReservations(accountId, type, tag, cleanupBefore); + if (rowsRemoved > 0) { + logger.warn("Removed {} stale resource reservations for account {} of type {} and tag {}", + rowsRemoved, accountId, type, tag); + } + } + @DB protected long recalculateAccountResourceCount(final long accountId, final ResourceType type, String tag) { + cleanupStaleResourceReservations(accountId, type, tag); final Long newCount; if (type == Resource.ResourceType.user_vm) { newCount = calculateVmCountForAccount(accountId, tag); @@ -2106,10 +2135,13 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim public ConfigKey[] getConfigKeys() { return new ConfigKey[] { ResourceCountCheckInterval, + ResourceReservationCleanupDelay, MaxAccountSecondaryStorage, MaxProjectSecondaryStorage, ResourceLimitHostTags, - ResourceLimitStorageTags + ResourceLimitStorageTags, + DefaultMaxAccountProjects, + DefaultMaxDomainProjects }; } diff --git a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java index 81071db3810..7926498c123 100644 --- a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java @@ -1201,28 +1201,28 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio // Offering #9 - network offering for NSX provider - NATTED mode createAndPersistDefaultNsxOffering(NetworkOffering.DEFAULT_NAT_NSX_OFFERING, "Offering for NSX enabled networks - NAT mode", - NetworkOffering.NsxMode.NATTED, false, true); + NetworkOffering.NetworkMode.NATTED, false, true); // Offering #10 - network offering for NSX provider - ROUTED mode createAndPersistDefaultNsxOffering(NetworkOffering.DEFAULT_ROUTED_NSX_OFFERING, "Offering for NSX enabled networks - ROUTED mode", - NetworkOffering.NsxMode.ROUTED, false, true); + NetworkOffering.NetworkMode.ROUTED, false, true); // Offering #11 - network offering for NSX provider for VPCs - NATTED mode createAndPersistDefaultNsxOffering(NetworkOffering.DEFAULT_NAT_NSX_OFFERING_FOR_VPC, "Offering for NSX enabled networks on VPCs - NAT mode", - NetworkOffering.NsxMode.NATTED, true, true); + NetworkOffering.NetworkMode.NATTED, true, true); // Offering #12 - network offering for NSX provider for VPCs - ROUTED mode createAndPersistDefaultNsxOffering(NetworkOffering.DEFAULT_ROUTED_NSX_OFFERING_FOR_VPC, "Offering for NSX enabled networks on VPCs - ROUTED mode", - NetworkOffering.NsxMode.ROUTED, true, true); + NetworkOffering.NetworkMode.ROUTED, true, true); // Offering #13 - network offering for NSX provider for VPCs with Internal LB - NATTED mode createAndPersistDefaultNsxOffering(NetworkOffering.DEFAULT_NAT_NSX_OFFERING_FOR_VPC_WITH_ILB, "Offering for NSX enabled networks on VPCs with internal LB - NAT mode", - NetworkOffering.NsxMode.NATTED, true, false); + NetworkOffering.NetworkMode.NATTED, true, false); } }); } - private void createAndPersistDefaultNsxOffering(String name, String displayText, NetworkOffering.NsxMode nsxMode, + private void createAndPersistDefaultNsxOffering(String name, String displayText, NetworkOffering.NetworkMode networkMode, boolean forVpc, boolean publicLB) { NetworkOfferingVO defaultNatNSXNetworkOffering = new NetworkOfferingVO(name, displayText, TrafficType.Guest, false, false, null, @@ -1231,11 +1231,11 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio defaultNatNSXNetworkOffering.setPublicLb(publicLB); defaultNatNSXNetworkOffering.setInternalLb(!publicLB); defaultNatNSXNetworkOffering.setForNsx(true); - defaultNatNSXNetworkOffering.setNsxMode(nsxMode.name()); + defaultNatNSXNetworkOffering.setNetworkMode(networkMode); defaultNatNSXNetworkOffering.setState(NetworkOffering.State.Enabled); defaultNatNSXNetworkOffering = _networkOfferingDao.persistDefaultNetworkOffering(defaultNatNSXNetworkOffering); - Map serviceProviderMap = getServicesAndProvidersForNSXNetwork(nsxMode, forVpc, publicLB); + Map serviceProviderMap = getServicesAndProvidersForNSXNetwork(networkMode, forVpc, publicLB); for (Map.Entry service : serviceProviderMap.entrySet()) { NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultNatNSXNetworkOffering.getId(), service.getKey(), service.getValue()); @@ -1244,7 +1244,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } } - private Map getServicesAndProvidersForNSXNetwork(NetworkOffering.NsxMode nsxMode, boolean forVpc, boolean publicLB) { + private Map getServicesAndProvidersForNSXNetwork(NetworkOffering.NetworkMode networkMode, boolean forVpc, boolean publicLB) { final Map serviceProviderMap = new HashMap<>(); Provider routerProvider = forVpc ? Provider.VPCVirtualRouter : Provider.VirtualRouter; serviceProviderMap.put(Service.Dhcp, routerProvider); @@ -1255,7 +1255,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } else { serviceProviderMap.put(Service.Firewall, Provider.Nsx); } - if (nsxMode == NetworkOffering.NsxMode.NATTED) { + if (networkMode == NetworkOffering.NetworkMode.NATTED) { serviceProviderMap.put(Service.SourceNat, Provider.Nsx); serviceProviderMap.put(Service.StaticNat, Provider.Nsx); serviceProviderMap.put(Service.PortForwarding, Provider.Nsx); diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 15a52d3f750..cbde58dc721 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -17,6 +17,7 @@ package com.cloud.server; import java.lang.reflect.Field; +import java.security.cert.CertificateException; import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; @@ -43,6 +44,7 @@ import javax.crypto.spec.SecretKeySpec; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.utils.security.CertificateHelper; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.SecurityChecker; import org.apache.cloudstack.affinity.AffinityGroupProcessor; @@ -66,6 +68,11 @@ import org.apache.cloudstack.api.command.admin.affinitygroup.UpdateVMAffinityGro import org.apache.cloudstack.api.command.admin.alert.GenerateAlertCmd; import org.apache.cloudstack.api.command.admin.autoscale.CreateCounterCmd; import org.apache.cloudstack.api.command.admin.autoscale.DeleteCounterCmd; +import org.apache.cloudstack.api.command.admin.bgp.CreateASNRangeCmd; +import org.apache.cloudstack.api.command.admin.bgp.DeleteASNRangeCmd; +import org.apache.cloudstack.api.command.admin.bgp.ListASNRangesCmd; +import org.apache.cloudstack.api.command.user.bgp.ListASNumbersCmd; +import org.apache.cloudstack.api.command.admin.bgp.ReleaseASNumberCmd; import org.apache.cloudstack.api.command.admin.cluster.AddClusterCmd; import org.apache.cloudstack.api.command.admin.cluster.DeleteClusterCmd; import org.apache.cloudstack.api.command.admin.cluster.ListClustersCmd; @@ -211,6 +218,7 @@ import org.apache.cloudstack.api.command.admin.storage.AddImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.AddImageStoreS3CMD; import org.apache.cloudstack.api.command.admin.storage.AddObjectStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; +import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd; import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd; @@ -494,6 +502,7 @@ import org.apache.cloudstack.api.command.user.snapshot.CreateSnapshotFromVMSnaps import org.apache.cloudstack.api.command.user.snapshot.CreateSnapshotPolicyCmd; import org.apache.cloudstack.api.command.user.snapshot.DeleteSnapshotCmd; import org.apache.cloudstack.api.command.user.snapshot.DeleteSnapshotPoliciesCmd; +import org.apache.cloudstack.api.command.user.snapshot.ExtractSnapshotCmd; import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotPoliciesCmd; import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotsCmd; import org.apache.cloudstack.api.command.user.snapshot.RevertSnapshotCmd; @@ -523,6 +532,7 @@ import org.apache.cloudstack.api.command.user.vm.AddIpToVmNicCmd; import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd; import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; import org.apache.cloudstack.api.command.user.vm.DestroyVMCmd; +import org.apache.cloudstack.api.command.admin.vm.ListAffectedVmsForStorageScopeChangeCmd; import org.apache.cloudstack.api.command.user.vm.GetVMPasswordCmd; import org.apache.cloudstack.api.command.user.vm.ListNicsCmd; import org.apache.cloudstack.api.command.user.vm.ListVMsCmd; @@ -844,10 +854,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe protected StateMachine2 _stateMachine; static final String FOR_SYSTEMVMS = "forsystemvms"; - static final ConfigKey vmPasswordLength = new ConfigKey("Advanced", Integer.class, "vm.password.length", "6", "Specifies the length of a randomly generated password", false); - static final ConfigKey sshKeyLength = new ConfigKey("Advanced", Integer.class, "ssh.key.length", "2048", "Specifies custom SSH key length (bit)", true, ConfigKey.Scope.Global); - static final ConfigKey humanReadableSizes = new ConfigKey("Advanced", Boolean.class, "display.human.readable.sizes", "true", "Enables outputting human readable byte sizes to logs and usage records.", false, ConfigKey.Scope.Global); - public static final ConfigKey customCsIdentifier = new ConfigKey("Advanced", String.class, "custom.cs.identifier", UUID.randomUUID().toString().split("-")[0].substring(4), "Custom identifier for the cloudstack installation", true, ConfigKey.Scope.Global); + static final ConfigKey vmPasswordLength = new ConfigKey<>("Advanced", Integer.class, "vm.password.length", "6", "Specifies the length of a randomly generated password", false); + static final ConfigKey sshKeyLength = new ConfigKey<>("Advanced", Integer.class, "ssh.key.length", "2048", "Specifies custom SSH key length (bit)", true, ConfigKey.Scope.Global); + static final ConfigKey humanReadableSizes = new ConfigKey<>("Advanced", Boolean.class, "display.human.readable.sizes", "true", "Enables outputting human readable byte sizes to logs and usage records.", false, ConfigKey.Scope.Global); + public static final ConfigKey customCsIdentifier = new ConfigKey<>("Advanced", String.class, "custom.cs.identifier", UUID.randomUUID().toString().split("-")[0].substring(4), "Custom identifier for the cloudstack installation", true, ConfigKey.Scope.Global); private static final VirtualMachine.Type []systemVmTypes = { VirtualMachine.Type.SecondaryStorageVm, VirtualMachine.Type.ConsoleProxy}; private static final List LIVE_MIGRATION_SUPPORTING_HYPERVISORS = List.of(HypervisorType.Hyperv, HypervisorType.KVM, HypervisorType.LXC, HypervisorType.Ovm, HypervisorType.Ovm3, HypervisorType.Simulator, HypervisorType.VMware, HypervisorType.XenServer); @@ -1034,7 +1044,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe protected List _planners; - private final List supportedHypervisors = new ArrayList(); + private final List supportedHypervisors = new ArrayList<>(); public List getPlanners() { return _planners; @@ -1112,7 +1122,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } final String[] availableIds = TimeZone.getAvailableIDs(); - _availableIdsMap = new HashMap(availableIds.length); + _availableIdsMap = new HashMap<>(availableIds.length); for (final String id : availableIds) { _availableIdsMap.put(id, true); } @@ -1196,7 +1206,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe final Account caller = getCaller(); final List ids = cmd.getIds(); boolean result = true; - List permittedAccountIds = new ArrayList(); + List permittedAccountIds = new ArrayList<>(); if (_accountService.isNormalUser(caller.getId()) || caller.getType() == Account.Type.PROJECT) { permittedAccountIds.add(caller.getId()); @@ -1211,8 +1221,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, false, sameOwnerEvents); if (ids != null && events.size() < ids.size()) { - result = false; - return result; + return false; } _eventDao.archiveEvents(events); return result; @@ -1223,7 +1232,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe final Account caller = getCaller(); final List ids = cmd.getIds(); boolean result = true; - List permittedAccountIds = new ArrayList(); + List permittedAccountIds = new ArrayList<>(); if (_accountMgr.isNormalUser(caller.getId()) || caller.getType() == Account.Type.PROJECT) { permittedAccountIds.add(caller.getId()); @@ -1238,8 +1247,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, false, sameOwnerEvents); if (ids != null && events.size() < ids.size()) { - result = false; - return result; + return false; } for (final EventVO event : events) { _eventDao.remove(event.getId()); @@ -1322,7 +1330,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } final Pair, Integer> result = _clusterDao.searchAndCount(sc, searchFilter); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } private HypervisorType getHypervisorType(VMInstanceVO vm, StoragePool srcVolumePool) { @@ -1362,7 +1370,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe final Pair, Integer> result = searchForServers(cmd.getStartIndex(), cmd.getPageSizeVal(), name, type, state, zoneId, pod, cluster, id, keyword, resourceState, haHosts, null, null); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } protected Pair> filterUefiHostsForMigration(List allHosts, List filteredHosts, VirtualMachine vm) { @@ -1596,20 +1604,17 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, HostAllocator.RETURN_UPTO_ALL, false); } - if (suitableHosts != null && !suitableHosts.isEmpty()) { + if (CollectionUtils.isNotEmpty(suitableHosts)) { break; } } - // re-order hosts by priority _dpMgr.reorderHostsByPriority(plan.getHostPriorities(), suitableHosts); - if (logger.isDebugEnabled()) { - if (suitableHosts.isEmpty()) { - logger.debug("No suitable hosts found"); - } else { - logger.debug("Hosts having capacity and suitable for migration: " + suitableHosts); - } + if (suitableHosts.isEmpty()) { + logger.warn("No suitable hosts found."); + } else { + logger.debug("Hosts having capacity and suitable for migration: {}", suitableHosts); } return new Ternary<>(otherHosts, suitableHosts, requiresStorageMotion); @@ -1660,9 +1665,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe StoragePool datastoreCluster = _poolDao.findById(srcVolumePool.getParent()); avoidPools.add(datastoreCluster); } - abstractDataStoreClustersList((List) allPools, new ArrayList()); + abstractDataStoreClustersList((List) allPools, new ArrayList<>()); abstractDataStoreClustersList((List) suitablePools, avoidPools); - return new Pair, List>(allPools, suitablePools); + return new Pair<>(allPools, suitablePools); } @Override @@ -1694,13 +1699,13 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } // Volume must be attached to an instance for live migration. - List allPools = new ArrayList(); - List suitablePools = new ArrayList(); + List allPools = new ArrayList<>(); + List suitablePools = new ArrayList<>(); // Volume must be in Ready state to be migrated. if (!Volume.State.Ready.equals(volume.getState())) { logger.info("Volume " + volume + " must be in ready state for migration."); - return new Pair, List>(allPools, suitablePools); + return new Pair<>(allPools, suitablePools); } final Long instanceId = volume.getInstanceId(); @@ -1736,7 +1741,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (!storageMotionSupported) { logger.info("Volume " + volume + " is attached to a running vm and the hypervisor doesn't support" + " storage motion."); - return new Pair, List>(allPools, suitablePools); + return new Pair<>(allPools, suitablePools); } } @@ -1759,7 +1764,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } removeDataStoreClusterParents((List) allPools); removeDataStoreClusterParents((List) suitablePools); - return new Pair, List>(allPools, suitablePools); + return new Pair<>(allPools, suitablePools); } private void removeDataStoreClusterParents(List storagePools) { @@ -2034,7 +2039,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } final Pair, Integer> result = _hostPodDao.searchAndCount(sc, searchFilter); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } @Override @@ -2172,7 +2177,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } final Pair, Integer> result = _vlanDao.searchAndCount(sc, searchFilter); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } @Override @@ -2305,7 +2310,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (scope != null && !scope.isEmpty()) { // Populate values corresponding the resource id - final List configVOList = new ArrayList(); + final List configVOList = new ArrayList<>(); for (final ConfigurationVO param : result.first()) { final ConfigurationVO configVo = _configDao.findByName(param.getName()); if (configVo != null) { @@ -2327,10 +2332,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } } - return new Pair, Integer>(configVOList, configVOList.size()); + return new Pair<>(configVOList, configVOList.size()); } - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } @Override @@ -2344,7 +2349,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } final Pair, Integer> result = _configGroupDao.searchAndCount(sc, searchFilter); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } @Override @@ -2556,7 +2561,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe Collections.sort(addrs, Comparator.comparing(IPAddressVO::getAddress)); List wPagination = com.cloud.utils.StringUtils.applyPagination(addrs, cmd.getStartIndex(), cmd.getPageSizeVal()); if (wPagination != null) { - return new Pair, Integer>(wPagination, addrs.size()); + return new Pair<>(wPagination, addrs.size()); } return new Pair<>(addrs, addrs.size()); } @@ -2728,7 +2733,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } final Pair, Integer> result = _guestOSCategoryDao.searchAndCount(sc, searchFilter); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } @Override @@ -2781,7 +2786,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } final Pair, Integer> result = _guestOSHypervisorDao.searchAndCount(sc, searchFilter); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } @Override @@ -3147,7 +3152,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe public Pair getVncPort(final VirtualMachine vm) { if (vm.getHostId() == null) { logger.warn("VM " + vm.getHostName() + " does not have host, return -1 for its VNC port"); - return new Pair(null, -1); + return new Pair<>(null, -1); } if (logger.isTraceEnabled()) { @@ -3161,10 +3166,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe answer = (GetVncPortAnswer)_agentMgr.easySend(vm.getHostId(), new GetVncPortCommand(vm.getId(), vm.getInstanceName())); } if (answer != null && answer.getResult()) { - return new Pair(answer.getAddress(), answer.getPort()); + return new Pair<>(answer.getAddress(), answer.getPort()); } - return new Pair(null, -1); + return new Pair<>(null, -1); } @Override @@ -3202,21 +3207,19 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe sc.addAnd("archived", SearchCriteria.Op.EQ, false); final Pair, Integer> result = _alertDao.searchAndCount(sc, searchFilter); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } @Override public boolean archiveAlerts(final ArchiveAlertsCmd cmd) { final Long zoneId = _accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), null); - final boolean result = _alertDao.archiveAlert(cmd.getIds(), cmd.getType(), cmd.getStartDate(), cmd.getEndDate(), zoneId); - return result; + return _alertDao.archiveAlert(cmd.getIds(), cmd.getType(), cmd.getStartDate(), cmd.getEndDate(), zoneId); } @Override public boolean deleteAlerts(final DeleteAlertsCmd cmd) { final Long zoneId = _accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), null); - final boolean result = _alertDao.deleteAlert(cmd.getIds(), cmd.getType(), cmd.getStartDate(), cmd.getEndDate(), zoneId); - return result; + return _alertDao.deleteAlert(cmd.getIds(), cmd.getType(), cmd.getStartDate(), cmd.getEndDate(), zoneId); } Pair> getHostIdsForCapacityListing(Long zoneId, Long podId, Long clusterId, Integer capacityType, String tag) { @@ -3482,7 +3485,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Override public List> getCommands() { - final List> cmdList = new ArrayList>(); + final List> cmdList = new ArrayList<>(); cmdList.add(CreateAccountCmd.class); cmdList.add(DeleteAccountCmd.class); cmdList.add(DisableAccountCmd.class); @@ -3573,6 +3576,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(UpgradeRouterCmd.class); cmdList.add(AddSwiftCmd.class); cmdList.add(CancelPrimaryStorageMaintenanceCmd.class); + cmdList.add(ChangeStoragePoolScopeCmd.class); cmdList.add(CreateStoragePoolCmd.class); cmdList.add(DeletePoolCmd.class); cmdList.add(ListSwiftsCmd.class); @@ -3759,6 +3763,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(CreateSnapshotFromVMSnapshotCmd.class); cmdList.add(CopySnapshotCmd.class); cmdList.add(DeleteSnapshotCmd.class); + cmdList.add(ExtractSnapshotCmd.class); cmdList.add(ArchiveSnapshotCmd.class); cmdList.add(CreateSnapshotPolicyCmd.class); cmdList.add(UpdateSnapshotPolicyCmd.class); @@ -4010,7 +4015,13 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(CreateSecondaryStorageSelectorCmd.class); cmdList.add(UpdateSecondaryStorageSelectorCmd.class); cmdList.add(RemoveSecondaryStorageSelectorCmd.class); + cmdList.add(ListAffectedVmsForStorageScopeChangeCmd.class); + cmdList.add(CreateASNRangeCmd.class); + cmdList.add(ListASNRangesCmd.class); + cmdList.add(DeleteASNRangeCmd.class); + cmdList.add(ListASNumbersCmd.class); + cmdList.add(ReleaseASNumberCmd.class); // Out-of-band management APIs for admins cmdList.add(EnableOutOfBandManagementForHostCmd.class); @@ -4040,6 +4051,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(UpdateBucketCmd.class); cmdList.add(DeleteBucketCmd.class); cmdList.add(ListBucketsCmd.class); + return cmdList; } @@ -4251,7 +4263,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } final Pair, Integer> result = _vmInstanceDao.searchAndCount(sc, searchFilter); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } @Override @@ -4417,7 +4429,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe logger.warn("Exception whilst creating a signature:" + e); } - final ArrayList cloudParams = new ArrayList(); + final ArrayList cloudParams = new ArrayList<>(); cloudParams.add(cloudIdentifier); cloudParams.add(signature); @@ -4426,7 +4438,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Override public Map listCapabilities(final ListCapabilitiesCmd cmd) { - final Map capabilities = new HashMap(); + final Map capabilities = new HashMap<>(); final Account caller = getCaller(); boolean securityGroupsEnabled = false; @@ -4460,6 +4472,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe final boolean allowUserViewDestroyedVM = (QueryService.AllowUserViewDestroyedVM.valueIn(caller.getId()) | _accountService.isAdmin(caller.getId())); final boolean allowUserExpungeRecoverVM = (UserVmManager.AllowUserExpungeRecoverVm.valueIn(caller.getId()) | _accountService.isAdmin(caller.getId())); final boolean allowUserExpungeRecoverVolume = (VolumeApiServiceImpl.AllowUserExpungeRecoverVolume.valueIn(caller.getId()) | _accountService.isAdmin(caller.getId())); + final boolean allowUserForceStopVM = (UserVmManager.AllowUserForceStopVm.valueIn(caller.getId()) | _accountService.isAdmin(caller.getId())); final boolean allowUserViewAllDomainAccounts = (QueryService.AllowUserViewAllDomainAccounts.valueIn(caller.getDomainId())); @@ -4473,6 +4486,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe regionSecondaryEnabled = true; } + final Integer fsVmMinCpu = Integer.parseInt(_configDao.getValue("sharedfsvm.min.cpu.count")); + final Integer fsVmMinRam = Integer.parseInt(_configDao.getValue("sharedfsvm.min.ram.size")); + capabilities.put("securityGroupsEnabled", securityGroupsEnabled); capabilities.put("userPublicTemplateEnabled", userPublicTemplateEnabled); capabilities.put("cloudStackVersion", getVersion()); @@ -4487,6 +4503,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe capabilities.put("allowUserExpungeRecoverVM", allowUserExpungeRecoverVM); capabilities.put("allowUserExpungeRecoverVolume", allowUserExpungeRecoverVolume); capabilities.put("allowUserViewAllDomainAccounts", allowUserViewAllDomainAccounts); + capabilities.put(ApiConstants.ALLOW_USER_FORCE_STOP_VM, allowUserForceStopVM); capabilities.put("kubernetesServiceEnabled", kubernetesServiceEnabled); capabilities.put("kubernetesClusterExperimentalFeaturesEnabled", kubernetesClusterExperimentalFeaturesEnabled); capabilities.put("customHypervisorDisplayName", HypervisorGuru.HypervisorCustomDisplayName.value()); @@ -4499,6 +4516,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe capabilities.put("apiLimitInterval", apiLimitInterval); capabilities.put("apiLimitMax", apiLimitMax); } + capabilities.put(ApiConstants.SHAREDFSVM_MIN_CPU_COUNT, fsVmMinCpu); + capabilities.put(ApiConstants.SHAREDFSVM_MIN_RAM_SIZE, fsVmMinRam); return capabilities; } @@ -4573,13 +4592,12 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe final String certificate = cmd.getCertificate(); final String key = cmd.getPrivateKey(); + String domainSuffix = cmd.getDomainSuffix(); - if (cmd.getPrivateKey() != null && !_ksMgr.validateCertificate(certificate, key, cmd.getDomainSuffix())) { - throw new InvalidParameterValueException("Failed to pass certificate validation check"); - } + validateCertificate(certificate, key, domainSuffix); if (cmd.getPrivateKey() != null) { - _ksMgr.saveCertificate(ConsoleProxyManager.CERTIFICATE_NAME, certificate, key, cmd.getDomainSuffix()); + _ksMgr.saveCertificate(ConsoleProxyManager.CERTIFICATE_NAME, certificate, key, domainSuffix); // Reboot ssvm here since private key is present - meaning server cert being passed final List alreadyRunning = _secStorageVmDao.getSecStorageVmListInStates(null, State.Running, State.Migrating, State.Starting); @@ -4596,9 +4614,27 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe + "please give a few minutes for console access and storage services service to be up and working again"; } + private void validateCertificate(String certificate, String key, String domainSuffix) { + if (key != null) { + Pair result = _ksMgr.validateCertificate(certificate, key, domainSuffix); + if (!result.first()) { + throw new InvalidParameterValueException(String.format("Failed to pass certificate validation check with error: %s", result.second())); + } + } else { + try { + logger.debug(String.format("Trying to validate the root certificate format")); + CertificateHelper.buildCertificate(certificate); + } catch (CertificateException e) { + String errorMsg = String.format("Failed to pass certificate validation check with error: Certificate validation failed due to exception: %s", e.getMessage()); + logger.error(errorMsg); + throw new InvalidParameterValueException(errorMsg); + } + } + } + @Override public List getHypervisors(final Long zoneId) { - final List result = new ArrayList(); + final List result = new ArrayList<>(); final String hypers = _configDao.getValue(Config.HypervisorList.key()); final String[] hypervisors = hypers.split(","); @@ -4810,9 +4846,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe final String keyword = cmd.getKeyword(); final Account caller = getCaller(); - final List permittedAccounts = new ArrayList(); + final List permittedAccounts = new ArrayList<>(); - final Ternary domainIdRecursiveListProject = new Ternary(cmd.getDomainId(), cmd.isRecursive(), null); + final Ternary domainIdRecursiveListProject = new Ternary<>(cmd.getDomainId(), cmd.isRecursive(), null); _accountMgr.buildACLSearchParameters(caller, null, cmd.getAccountName(), cmd.getProjectId(), permittedAccounts, domainIdRecursiveListProject, cmd.listAll(), false); final Long domainId = domainIdRecursiveListProject.first(); final Boolean isRecursive = domainIdRecursiveListProject.second(); @@ -5155,25 +5191,54 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } final Pair, Integer> result = _hypervisorCapabilitiesDao.searchAndCount(sc, searchFilter); - return new Pair, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); + } + + protected HypervisorCapabilitiesVO getHypervisorCapabilitiesForUpdate(final Long id, final String hypervisorStr, final String hypervisorVersion) { + if (id == null && StringUtils.isAllEmpty(hypervisorStr, hypervisorVersion)) { + throw new InvalidParameterValueException("Either ID or hypervisor and hypervisor version must be specified"); + } + if (id != null) { + if (!StringUtils.isAllBlank(hypervisorStr, hypervisorVersion)) { + throw new InvalidParameterValueException("ID can not be specified together with hypervisor and hypervisor version"); + } + HypervisorCapabilitiesVO hpvCapabilities = _hypervisorCapabilitiesDao.findById(id, true); + if (hpvCapabilities == null) { + final InvalidParameterValueException ex = new InvalidParameterValueException("unable to find the hypervisor capabilities for specified id"); + ex.addProxyObject(id.toString(), "Id"); + throw ex; + } + return hpvCapabilities; + } + if (StringUtils.isAnyBlank(hypervisorStr, hypervisorVersion)) { + throw new InvalidParameterValueException("Hypervisor and hypervisor version must be specified together"); + } + HypervisorType hypervisorType = HypervisorType.getType(hypervisorStr); + if (hypervisorType == HypervisorType.None) { + throw new InvalidParameterValueException("Invalid hypervisor specified"); + } + HypervisorCapabilitiesVO hpvCapabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(hypervisorType, hypervisorVersion); + if (hpvCapabilities == null) { + final InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find the hypervisor capabilities for specified hypervisor and hypervisor version"); + ex.addProxyObject(hypervisorStr, "hypervisor"); + ex.addProxyObject(hypervisorVersion, "hypervisorVersion"); + throw ex; + } + return hpvCapabilities; } @Override public HypervisorCapabilities updateHypervisorCapabilities(UpdateHypervisorCapabilitiesCmd cmd) { - final Long id = cmd.getId(); + Long id = cmd.getId(); + final String hypervisorStr = cmd.getHypervisor(); + final String hypervisorVersion = cmd.getHypervisorVersion(); final Boolean securityGroupEnabled = cmd.getSecurityGroupEnabled(); final Long maxGuestsLimit = cmd.getMaxGuestsLimit(); final Integer maxDataVolumesLimit = cmd.getMaxDataVolumesLimit(); final Boolean storageMotionSupported = cmd.getStorageMotionSupported(); final Integer maxHostsPerClusterLimit = cmd.getMaxHostsPerClusterLimit(); final Boolean vmSnapshotEnabled = cmd.getVmSnapshotEnabled(); - HypervisorCapabilitiesVO hpvCapabilities = _hypervisorCapabilitiesDao.findById(id, true); - - if (hpvCapabilities == null) { - final InvalidParameterValueException ex = new InvalidParameterValueException("unable to find the hypervisor capabilities for specified id"); - ex.addProxyObject(id.toString(), "Id"); - throw ex; - } + HypervisorCapabilitiesVO hpvCapabilities = getHypervisorCapabilitiesForUpdate(id, hypervisorStr, hypervisorVersion); final boolean updateNeeded = securityGroupEnabled != null || maxGuestsLimit != null || maxDataVolumesLimit != null || storageMotionSupported != null || maxHostsPerClusterLimit != null || @@ -5181,7 +5246,14 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (!updateNeeded) { return hpvCapabilities; } + if (StringUtils.isNotBlank(hypervisorVersion) && !hpvCapabilities.getHypervisorVersion().equals(hypervisorVersion)) { + logger.debug(String.format("Hypervisor capabilities for hypervisor: %s and version: %s does not exist, creating a copy from the parent version: %s for update.", hypervisorStr, hypervisorVersion, hpvCapabilities.getHypervisorVersion())); + HypervisorCapabilitiesVO copy = new HypervisorCapabilitiesVO(hpvCapabilities); + copy.setHypervisorVersion(hypervisorVersion); + hpvCapabilities = _hypervisorCapabilitiesDao.persist(copy); + } + id = hpvCapabilities.getId(); hpvCapabilities = _hypervisorCapabilitiesDao.createForUpdate(id); if (securityGroupEnabled != null) { @@ -5302,7 +5374,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Override public List listDeploymentPlanners() { - final List plannersAvailable = new ArrayList(); + final List plannersAvailable = new ArrayList<>(); for (final DeploymentPlanner planner : _planners) { plannersAvailable.add(planner.getName()); } diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index f9ad0f51966..70959b56cfd 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.server; +import static com.cloud.configuration.ConfigurationManagerImpl.DELETE_QUERY_BATCH_SIZE; import static com.cloud.utils.NumbersUtil.toHumanReadableSize; import java.lang.management.ManagementFactory; @@ -63,10 +64,10 @@ import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.cloudstack.utils.usage.UsageUtils; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; -import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.time.DateUtils; +import org.apache.logging.log4j.Level; import org.influxdb.BatchOptions; import org.influxdb.InfluxDB; import org.influxdb.InfluxDBFactory; @@ -113,9 +114,6 @@ import com.cloud.org.Cluster; import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceState; import com.cloud.serializer.GsonHelper; -import com.cloud.server.StatsCollector.AbstractStatsCollector; -import com.cloud.server.StatsCollector.AutoScaleMonitor; -import com.cloud.server.StatsCollector.StorageCollector; import com.cloud.storage.ImageStoreDetailsUtil; import com.cloud.storage.ScopeType; import com.cloud.storage.Storage; @@ -276,12 +274,9 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc private static final ConfigKey statsOutputUri = new ConfigKey<>("Advanced", String.class, "stats.output.uri", "", "URI to send StatsCollector statistics to. The collector is defined on the URI scheme. Example: graphite://graphite-hostaddress:port or influxdb://influxdb-hostaddress/dbname. Note that the port is optional, if not added the default port for the respective collector (graphite or influxdb) will be used. Additionally, the database name '/dbname' is also optional; default db name is 'cloudstack'. You must create and configure the database if using influxdb.", true); - protected static ConfigKey vmStatsIncrementMetrics = new ConfigKey<>("Advanced", Boolean.class, "vm.stats.increment.metrics", "true", + protected static ConfigKey vmStatsIncrementMetrics = new ConfigKey<>("Advanced", Boolean.class, "vm.stats.increment.metrics", "false", "When set to 'true', VM metrics(NetworkReadKBs, NetworkWriteKBs, DiskWriteKBs, DiskReadKBs, DiskReadIOs and DiskWriteIOs) that are collected from the hypervisor are summed before being returned." + "On the other hand, when set to 'false', the VM metrics API will just display the latest metrics collected.", true); - private static final ConfigKey VM_STATS_INCREMENT_METRICS_IN_MEMORY = new ConfigKey<>("Advanced", Boolean.class, "vm.stats.increment.metrics.in.memory", "true", - "When set to 'true', VM metrics(NetworkReadKBs, NetworkWriteKBs, DiskWriteKBs, DiskReadKBs, DiskReadIOs and DiskWriteIOs) that are collected from the hypervisor are summed and stored in memory. " - + "On the other hand, when set to 'false', the VM metrics API will just display the latest metrics collected.", true); protected static ConfigKey vmStatsMaxRetentionTime = new ConfigKey<>("Advanced", Integer.class, "vm.stats.max.retention.time", "720", "The maximum time (in minutes) for keeping VM stats records in the database. The VM stats cleanup process will be disabled if this is set to 0 or less than 0.", true); @@ -1673,7 +1668,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } List stores = _dataStoreMgr.listImageStores(); - ConcurrentHashMap storageStats = new ConcurrentHashMap(); + ConcurrentHashMap storageStats = new ConcurrentHashMap<>(); for (DataStore store : stores) { if (store.getUri() == null) { continue; @@ -1693,7 +1688,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc logger.trace("HostId: " + storeId + " Used: " + toHumanReadableSize(((StorageStats)answer).getByteUsed()) + " Total Available: " + toHumanReadableSize(((StorageStats)answer).getCapacityBytes())); } } - _storageStats = storageStats; + updateStorageStats(storageStats); ConcurrentHashMap storagePoolStats = new ConcurrentHashMap(); List storagePools = _storagePoolDao.listAll(); @@ -1719,7 +1714,8 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc pool.setCapacityBytes(capacityBytes); poolNeedsUpdating = true; } else { - logger.warn("Not setting capacity bytes, received " + ((StorageStats)answer).getCapacityBytes() + " capacity for pool ID " + poolId); + logger.warn("Not setting capacity bytes, received {} capacity for pool ID {}", + NumbersUtil.toReadableSize(((StorageStats)answer).getCapacityBytes()), poolId); } } if (((_storagePoolStats.get(poolId) != null && _storagePoolStats.get(poolId).getByteUsed() != usedBytes) @@ -1743,6 +1739,19 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc logger.error("Error trying to retrieve storage stats", t); } } + + private void updateStorageStats(ConcurrentHashMap storageStats) { + for (Long storeId : storageStats.keySet()) { + if (_storageStats.containsKey(storeId) + && (_storageStats.get(storeId).getCapacityBytes() == 0l + || _storageStats.get(storeId).getCapacityBytes() != storageStats.get(storeId).getCapacityBytes())) { + // get add to DB rigorously + _storageManager.updateImageStoreStatus(storeId, null, null, storageStats.get(storeId).getCapacityBytes()); + } + } + // if in _storageStats and not in storageStats it gets discarded + _storageStats = storageStats; + } } class AutoScaleMonitor extends ManagedContextRunnable { @@ -1823,16 +1832,15 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc double totalCapacity = imageStoreStats.getCapacityBytes(); double usedCapacity = imageStoreStats.getByteUsed(); double threshold = getImageStoreCapacityThreshold(); - String readableTotalCapacity = FileUtils.byteCountToDisplaySize((long) totalCapacity); - String readableUsedCapacity = FileUtils.byteCountToDisplaySize((long) usedCapacity); - - logger.debug(String.format("Verifying image storage [%s]. Capacity: total=[%s], used=[%s], threshold=[%s%%].", imageStoreId, readableTotalCapacity, readableUsedCapacity, threshold * 100)); + String readableTotalCapacity = NumbersUtil.toReadableSize((long) totalCapacity); + String readableUsedCapacity = NumbersUtil.toReadableSize((long) usedCapacity); + logger.printf(Level.DEBUG, "Verifying image storage [%s]. Capacity: total=[%s], used=[%s], threshold=[%.2f%%].", imageStoreId, readableTotalCapacity, readableUsedCapacity, threshold * 100); if (usedCapacity / totalCapacity <= threshold) { return true; } - logger.warn(String.format("Image storage [%s] has not enough capacity. Capacity: total=[%s], used=[%s], threshold=[%s%%].", imageStoreId, readableTotalCapacity, readableUsedCapacity, threshold * 100)); + logger.printf(Level.WARN, "Image storage [%s] has not enough capacity. Capacity: total=[%s], used=[%s], threshold=[%.2f%%].", imageStoreId, readableTotalCapacity, readableUsedCapacity, threshold * 100); return false; } @@ -1953,7 +1961,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc logger.trace("Removing older VM stats records."); Date now = new Date(); Date limit = DateUtils.addMinutes(now, -maxRetentionTime); - vmStatsDao.removeAllByTimestampLessThan(limit); + vmStatsDao.removeAllByTimestampLessThan(limit, DELETE_QUERY_BATCH_SIZE.value()); } /** @@ -1972,7 +1980,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc logger.trace("Removing older Volume stats records."); Date now = new Date(); Date limit = DateUtils.addMinutes(now, -maxRetentionTime); - volumeStatsDao.removeAllByTimestampLessThan(limit); + volumeStatsDao.removeAllByTimestampLessThan(limit, DELETE_QUERY_BATCH_SIZE.value()); } /** @@ -2129,7 +2137,6 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc public ConfigKey[] getConfigKeys() { return new ConfigKey[] {vmDiskStatsInterval, vmDiskStatsIntervalMin, vmNetworkStatsInterval, vmNetworkStatsIntervalMin, StatsTimeout, statsOutputUri, vmStatsIncrementMetrics, vmStatsMaxRetentionTime, vmStatsCollectUserVMOnly, vmDiskStatsRetentionEnabled, vmDiskStatsMaxRetentionTime, - VM_STATS_INCREMENT_METRICS_IN_MEMORY, MANAGEMENT_SERVER_STATUS_COLLECTION_INTERVAL, DATABASE_SERVER_STATUS_COLLECTION_INTERVAL, DATABASE_SERVER_LOAD_HISTORY_RETENTION_NUMBER}; diff --git a/server/src/main/java/com/cloud/servlet/ConsoleProxyClientParam.java b/server/src/main/java/com/cloud/servlet/ConsoleProxyClientParam.java index e23778c0b98..b416ab98288 100644 --- a/server/src/main/java/com/cloud/servlet/ConsoleProxyClientParam.java +++ b/server/src/main/java/com/cloud/servlet/ConsoleProxyClientParam.java @@ -34,7 +34,16 @@ public class ConsoleProxyClientParam { private String username; private String password; + /** + * IP that has generated the console endpoint + */ private String sourceIP; + + /** + * IP of the client that has connected to the console + */ + private String clientIp; + private String websocketUrl; private String sessionUuid; @@ -201,4 +210,12 @@ public class ConsoleProxyClientParam { public void setSessionUuid(String sessionUuid) { this.sessionUuid = sessionUuid; } + + public String getClientIp() { + return clientIp; + } + + public void setClientIp(String clientIp) { + this.clientIp = clientIp; + } } diff --git a/server/src/main/java/com/cloud/storage/ResizeVolumePayload.java b/server/src/main/java/com/cloud/storage/ResizeVolumePayload.java index 84dcd302bdd..32aa09be8b6 100644 --- a/server/src/main/java/com/cloud/storage/ResizeVolumePayload.java +++ b/server/src/main/java/com/cloud/storage/ResizeVolumePayload.java @@ -46,4 +46,12 @@ public class ResizeVolumePayload { this(newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, shrinkOk, instanceName, hosts, isManaged); this.newDiskOfferingId = newDiskOfferingId; } + + public Long getNewDiskOfferingId() { + return newDiskOfferingId; + } + + public void setNewDiskOfferingId(Long newDiskOfferingId) { + this.newDiskOfferingId = newDiskOfferingId; + } } diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 9a6f8563223..2ed6be39b54 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -54,6 +54,7 @@ import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; +import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd; import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd; @@ -62,6 +63,7 @@ import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateObjectStoragePoolCmd; +import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.heuristics.CreateSecondaryStorageSelectorCmd; import org.apache.cloudstack.api.command.admin.storage.heuristics.RemoveSecondaryStorageSelectorCmd; @@ -138,7 +140,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.time.DateUtils; import org.apache.commons.lang3.EnumUtils; -import org.apache.commons.lang3.StringUtils; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -234,6 +235,7 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.UriUtils; +import com.cloud.utils.StringUtils; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; @@ -255,6 +257,7 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.DiskProfile; import com.cloud.vm.UserVmManager; import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.VMInstanceDao; import com.google.common.collect.Sets; @@ -386,6 +389,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C ConfigDepot configDepot; @Inject ConfigurationDao configurationDao; + @Inject + private ImageStoreDetailsUtil imageStoreDetailsUtil; protected List _discoverers; @@ -407,7 +412,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C int _downloadUrlExpirationInterval; private long _serverId; - private final Map hostListeners = new HashMap(); + private final Map hostListeners = new HashMap<>(); + + private final Set zoneWidePoolSupportedHypervisorTypes = Sets.newHashSet(HypervisorType.KVM, HypervisorType.VMware, + HypervisorType.Hyperv, HypervisorType.LXC, HypervisorType.Any, HypervisorType.Simulator); + + private static final String NFS_MOUNT_OPTIONS_INCORRECT = "An incorrect mount option was specified"; public boolean share(VMInstanceVO vm, List vols, HostVO host, boolean cancelPreviousShare) throws StorageUnavailableException { @@ -465,7 +475,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override public List ListByDataCenterHypervisor(long datacenterId, HypervisorType type) { List pools = _storagePoolDao.listByDataCenterId(datacenterId); - List retPools = new ArrayList(); + List retPools = new ArrayList<>(); for (StoragePoolVO pool : pools) { if (pool.getStatus() != StoragePoolStatus.Up) { continue; @@ -566,12 +576,37 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canProvideStorageStats(); } + @Override + public boolean poolProvidesCustomStorageStats(StoragePool pool) { + DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); + DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); + return storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).poolProvidesCustomStorageStats(); + } + + @Override + public Map getCustomStorageStats(StoragePool pool) { + if (pool == null) { + return null; + } + + if (!pool.isManaged()) { + return null; + } + + DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); + DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); + if (storeDriver instanceof PrimaryDataStoreDriver) { + return ((PrimaryDataStoreDriver)storeDriver).getCustomStorageStats(pool); + } + return null; + } + @Override public Answer getVolumeStats(StoragePool pool, Command cmd) { DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver) storeDriver; - HashMap statEntry = new HashMap(); + HashMap statEntry = new HashMap<>(); GetVolumeStatsCommand getVolumeStatsCommand = (GetVolumeStatsCommand) cmd; for (String volumeUuid : getVolumeStatsCommand.getVolumeUuids()) { Pair volumeStats = primaryStoreDriver.getVolumeStats(pool, volumeUuid); @@ -793,7 +828,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C DataStoreProvider provider = _dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider(); DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); if (pool == null) { - Map params = new HashMap(); + Map params = new HashMap<>(); String name = pInfo.getName() != null ? pInfo.getName() : createLocalStoragePoolName(host, pInfo); params.put("zoneId", host.getDataCenterId()); params.put("clusterId", host.getClusterId()); @@ -835,6 +870,53 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return String.format("%s-%s-%s", StringUtils.trim(host.getName()), "local", storagePoolInformation.getUuid().split("-")[0]); } + protected void checkNfsMountOptions(String nfsMountOpts) throws InvalidParameterValueException { + String[] options = nfsMountOpts.replaceAll("\\s", "").split(","); + Map optionsMap = new HashMap<>(); + for (String option : options) { + String[] keyValue = option.split("="); + if (keyValue.length > 2) { + throw new InvalidParameterValueException("Invalid value for NFS option " + keyValue[0]); + } + if (optionsMap.containsKey(keyValue[0])) { + throw new InvalidParameterValueException("Duplicate NFS option values found for option " + keyValue[0]); + } + optionsMap.put(keyValue[0], null); + } + } + + protected void checkNFSMountOptionsForCreate(Map details, HypervisorType hypervisorType, String scheme) throws InvalidParameterValueException { + if (!details.containsKey(ApiConstants.NFS_MOUNT_OPTIONS)) { + return; + } + if (!hypervisorType.equals(HypervisorType.KVM) && !hypervisorType.equals(HypervisorType.Simulator)) { + throw new InvalidParameterValueException("NFS options can not be set for the hypervisor type " + hypervisorType); + } + if (!"nfs".equals(scheme)) { + throw new InvalidParameterValueException("NFS options can only be set on pool type " + StoragePoolType.NetworkFilesystem); + } + checkNfsMountOptions(details.get(ApiConstants.NFS_MOUNT_OPTIONS)); + } + + protected void checkNFSMountOptionsForUpdate(Map details, StoragePoolVO pool, Long accountId) throws InvalidParameterValueException { + if (!details.containsKey(ApiConstants.NFS_MOUNT_OPTIONS)) { + return; + } + if (!_accountMgr.isRootAdmin(accountId)) { + throw new PermissionDeniedException("Only root admin can modify nfs options"); + } + if (!pool.getHypervisor().equals(HypervisorType.KVM) && !pool.getHypervisor().equals((HypervisorType.Simulator))) { + throw new InvalidParameterValueException("NFS options can only be set for the hypervisor type " + HypervisorType.KVM); + } + if (!pool.getPoolType().equals(StoragePoolType.NetworkFilesystem)) { + throw new InvalidParameterValueException("NFS options can only be set on pool type " + StoragePoolType.NetworkFilesystem); + } + if (!pool.isInMaintenance()) { + throw new InvalidParameterValueException("The storage pool should be in maintenance mode to edit nfs options"); + } + checkNfsMountOptions(details.get(ApiConstants.NFS_MOUNT_OPTIONS)); + } + @Override public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws ResourceInUseException, IllegalArgumentException, UnknownHostException, ResourceUnavailableException { String providerName = cmd.getStorageProviderName(); @@ -888,9 +970,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C throw new InvalidParameterValueException("Missing parameter hypervisor. Hypervisor type is required to create zone wide primary storage."); } - Set supportedHypervisorTypes = Sets.newHashSet(HypervisorType.KVM, HypervisorType.VMware, - HypervisorType.Hyperv, HypervisorType.LXC, HypervisorType.Any, HypervisorType.Simulator); - if (!supportedHypervisorTypes.contains(hypervisorType)) { + if (!zoneWidePoolSupportedHypervisorTypes.contains(hypervisorType)) { throw new InvalidParameterValueException("Zone wide storage pool is not supported for hypervisor type " + hypervisor); } } else { @@ -899,6 +979,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } Map details = extractApiParamAsMap(cmd.getDetails()); + checkNFSMountOptionsForCreate(details, hypervisorType, uriParams.get("scheme")); + DataCenterVO zone = _dcDao.findById(cmd.getZoneId()); if (zone == null) { throw new InvalidParameterValueException("unable to find zone by id " + zoneId); @@ -909,7 +991,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); } - Map params = new HashMap(); + Map params = new HashMap<>(); params.put("zoneId", zone.getId()); params.put("clusterId", clusterId); params.put("podId", podId); @@ -1031,7 +1113,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } private Map extractApiParamAsMap(Map ds) { - Map details = new HashMap(); + Map details = new HashMap<>(); if (ds != null) { Collection detailsCollection = ds.values(); Iterator it = detailsCollection.iterator(); @@ -1047,8 +1129,13 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return details; } + @Override @ActionEvent(eventType = EventTypes.EVENT_DISABLE_PRIMARY_STORAGE, eventDescription = "disable storage pool") - private void disablePrimaryStoragePool(StoragePoolVO primaryStorage) { + public StoragePool disablePrimaryStoragePool(Long id) { + StoragePoolVO primaryStorage = _storagePoolDao.findById(id); + if (primaryStorage == null) { + throw new IllegalArgumentException(String.format("Unable to find storage pool with ID: %d", id)); + } if (!primaryStorage.getStatus().equals(StoragePoolStatus.Up)) { throw new InvalidParameterValueException("Primary storage with id " + primaryStorage.getId() + " cannot be disabled. Storage pool state : " + primaryStorage.getStatus().toString()); } @@ -1057,10 +1144,17 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C DataStoreLifeCycle dataStoreLifeCycle = provider.getDataStoreLifeCycle(); DataStore store = _dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary); ((PrimaryDataStoreLifeCycle)dataStoreLifeCycle).disableStoragePool(store); + + return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(id, DataStoreRole.Primary); } + @Override @ActionEvent(eventType = EventTypes.EVENT_ENABLE_PRIMARY_STORAGE, eventDescription = "enable storage pool") - private void enablePrimaryStoragePool(StoragePoolVO primaryStorage) { + public StoragePool enablePrimaryStoragePool(Long id) { + StoragePoolVO primaryStorage = _storagePoolDao.findById(id); + if (primaryStorage == null) { + throw new IllegalArgumentException(String.format("Unable to find storage pool with ID: %d", id)); + } if (!primaryStorage.getStatus().equals(StoragePoolStatus.Disabled)) { throw new InvalidParameterValueException("Primary storage with id " + primaryStorage.getId() + " cannot be enabled. Storage pool state : " + primaryStorage.getStatus().toString()); } @@ -1069,9 +1163,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C DataStoreLifeCycle dataStoreLifeCycle = provider.getDataStoreLifeCycle(); DataStore store = _dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary); ((PrimaryDataStoreLifeCycle)dataStoreLifeCycle).enableStoragePool(store); + + return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(id, DataStoreRole.Primary); } @Override + @ActionEvent(eventType = EventTypes.EVENT_UPDATE_PRIMARY_STORAGE, eventDescription = "update storage pool") public PrimaryDataStoreInfo updateStoragePool(UpdateStoragePoolCmd cmd) throws IllegalArgumentException { // Input validation Long id = cmd.getId(); @@ -1081,6 +1178,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C throw new IllegalArgumentException("Unable to find storage pool with ID: " + id); } + Map inputDetails = extractApiParamAsMap(cmd.getDetails()); + checkNFSMountOptionsForUpdate(inputDetails, pool, cmd.getEntityOwnerId()); + String name = cmd.getName(); if(StringUtils.isNotBlank(name)) { logger.debug("Updating Storage Pool name to: " + name); @@ -1124,12 +1224,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } // retrieve current details and merge/overlay input to capture changes - Map inputDetails = extractApiParamAsMap(cmd.getDetails()); Map details = null; - if (inputDetails == null) { - details = _storagePoolDetailsDao.listDetailsKeyPairs(id); - } else { - details = _storagePoolDetailsDao.listDetailsKeyPairs(id); + details = _storagePoolDetailsDao.listDetailsKeyPairs(id); + if (inputDetails != null) { details.putAll(inputDetails); changes = true; } @@ -1156,16 +1253,116 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } - Boolean enabled = cmd.getEnabled(); - if (enabled != null) { - if (enabled) { - enablePrimaryStoragePool(pool); - } else { - disablePrimaryStoragePool(pool); - } + return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); + } + + private void changeStoragePoolScopeToZone(StoragePoolVO primaryStorage) { + /* + * For cluster wide primary storage the hypervisor type might not be set. + * So, get it from the clusterVO. + */ + Long clusterId = primaryStorage.getClusterId(); + ClusterVO clusterVO = _clusterDao.findById(clusterId); + HypervisorType hypervisorType = clusterVO.getHypervisorType(); + if (!zoneWidePoolSupportedHypervisorTypes.contains(hypervisorType)) { + throw new InvalidParameterValueException("Primary storage scope change to Zone is not supported for hypervisor type " + hypervisorType); } - return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); + DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName()); + PrimaryDataStoreLifeCycle lifeCycle = (PrimaryDataStoreLifeCycle) storeProvider.getDataStoreLifeCycle(); + + DataStore primaryStore = _dataStoreMgr.getPrimaryDataStore(primaryStorage.getId()); + ClusterScope clusterScope = new ClusterScope(primaryStorage.getClusterId(), null, primaryStorage.getDataCenterId()); + + lifeCycle.changeStoragePoolScopeToZone(primaryStore, clusterScope, hypervisorType); + } + + private void changeStoragePoolScopeToCluster(StoragePoolVO primaryStorage, Long clusterId) { + if (clusterId == null) { + throw new InvalidParameterValueException("Cluster ID not provided"); + } + ClusterVO clusterVO = _clusterDao.findById(clusterId); + if (clusterVO == null) { + throw new InvalidParameterValueException("Unable to find cluster by id " + clusterId); + } + if (clusterVO.getAllocationState().equals(Grouping.AllocationState.Disabled)) { + throw new PermissionDeniedException("Cannot perform this operation, Cluster is currently disabled: " + clusterId); + } + + List states = Arrays.asList(State.Starting, State.Running, State.Stopping, State.Migrating, State.Restoring); + + Long id = primaryStorage.getId(); + Pair, Integer> vmsNotInClusterUsingPool = _vmInstanceDao.listByVmsNotInClusterUsingPool(clusterId, id); + if (vmsNotInClusterUsingPool.second() != 0) { + throw new CloudRuntimeException(String.format("Cannot change scope of the storage pool [%s] to cluster [%s] " + + "as there are %s VMs with volumes in this pool that are running on other clusters. " + + "All such User VMs must be stopped and System VMs must be destroyed before proceeding. " + + "Please use the API listAffectedVmsForStorageScopeChange to get the list.", + primaryStorage.getName(), clusterVO.getName(), vmsNotInClusterUsingPool.second())); + } + + DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName()); + PrimaryDataStoreLifeCycle lifeCycle = (PrimaryDataStoreLifeCycle) storeProvider.getDataStoreLifeCycle(); + + DataStore primaryStore = _dataStoreMgr.getPrimaryDataStore(id); + ClusterScope clusterScope = new ClusterScope(clusterId, clusterVO.getPodId(), primaryStorage.getDataCenterId()); + + lifeCycle.changeStoragePoolScopeToCluster(primaryStore, clusterScope, primaryStorage.getHypervisor()); + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_CHANGE_STORAGE_POOL_SCOPE, eventDescription = "changing storage pool scope") + public void changeStoragePoolScope(ChangeStoragePoolScopeCmd cmd) throws IllegalArgumentException, InvalidParameterValueException, PermissionDeniedException { + Long id = cmd.getId(); + + Long accountId = cmd.getEntityOwnerId(); + if (!_accountMgr.isRootAdmin(accountId)) { + throw new PermissionDeniedException("Only root admin can perform this operation"); + } + + ScopeType newScope = EnumUtils.getEnumIgnoreCase(ScopeType.class, cmd.getScope()); + if (newScope != ScopeType.ZONE && newScope != ScopeType.CLUSTER) { + throw new InvalidParameterValueException("Invalid scope " + cmd.getScope() + "for Primary storage"); + } + + StoragePoolVO primaryStorage = _storagePoolDao.findById(id); + if (primaryStorage == null) { + throw new IllegalArgumentException("Unable to find storage pool with ID: " + id); + } + + String eventDetails = String.format(" Storage pool Id: %s to %s",primaryStorage.getUuid(), newScope); + CallContext.current().setEventDetails(eventDetails); + + ScopeType currentScope = primaryStorage.getScope(); + if (currentScope.equals(newScope)) { + throw new InvalidParameterValueException("New scope must be different than the current scope"); + } + + if (currentScope != ScopeType.ZONE && currentScope != ScopeType.CLUSTER) { + throw new InvalidParameterValueException("This operation is supported only for Primary storages having scope " + + ScopeType.CLUSTER + " or " + ScopeType.ZONE); + } + + if (!primaryStorage.getStatus().equals(StoragePoolStatus.Disabled)) { + throw new InvalidParameterValueException("Scope of the Primary storage with id " + + primaryStorage.getUuid() + + " cannot be changed, as it is not in the Disabled state"); + } + + Long zoneId = primaryStorage.getDataCenterId(); + DataCenterVO zone = _dcDao.findById(zoneId); + if (zone == null) { + throw new InvalidParameterValueException("Unable to find zone by id " + zoneId); + } + if (zone.getAllocationState().equals(Grouping.AllocationState.Disabled)) { + throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); + } + + if (newScope.equals(ScopeType.ZONE)) { + changeStoragePoolScopeToZone(primaryStorage); + } else { + changeStoragePoolScopeToCluster(primaryStorage, cmd.getClusterId()); + } } @Override @@ -1222,12 +1419,39 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } }); } else { - throw new CloudRuntimeException("Cannot delete pool " + sPool.getName() + " as there are associated " + "non-destroyed vols for this pool"); + logger.debug("Cannot delete storage pool {} as the following non-destroyed volumes are on it: {}.", sPool::getName, () -> getStoragePoolNonDestroyedVolumesLog(sPool.getId())); + throw new CloudRuntimeException(String.format("Cannot delete pool %s as there are non-destroyed volumes associated to this pool.", sPool.getName())); } } return deleteDataStoreInternal(sPool, forced); } + @Override + public Pair, Boolean> getStoragePoolNFSMountOpts(StoragePool pool, Map details) { + boolean details_added = false; + if (!pool.getPoolType().equals(Storage.StoragePoolType.NetworkFilesystem)) { + return new Pair<>(details, details_added); + } + + StoragePoolDetailVO nfsMountOpts = _storagePoolDetailsDao.findDetail(pool.getId(), ApiConstants.NFS_MOUNT_OPTIONS); + if (nfsMountOpts != null) { + if (details == null) { + details = new HashMap<>(); + } + details.put(ApiConstants.NFS_MOUNT_OPTIONS, nfsMountOpts.getValue()); + details_added = true; + } + return new Pair<>(details, details_added); + } + + public String getStoragePoolMountFailureReason(String reason) { + if (reason.toLowerCase().contains(NFS_MOUNT_OPTIONS_INCORRECT.toLowerCase())) { + return NFS_MOUNT_OPTIONS_INCORRECT; + } else { + return null; + } + } + private boolean checkIfDataStoreClusterCanbeDeleted(StoragePoolVO sPool, boolean forced) { List childStoragePools = _storagePoolDao.listChildStoragePoolsInDatastoreCluster(sPool.getId()); boolean canDelete = true; @@ -1257,7 +1481,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (vlms.first() > 0) { Pair nonDstrdVlms = volumeDao.getNonDestroyedCountAndTotalByPool(sPool.getId()); if (nonDstrdVlms.first() > 0) { - throw new CloudRuntimeException("Cannot delete pool " + sPool.getName() + " as there are associated " + "non-destroyed vols for this pool"); + logger.debug("Cannot delete storage pool {} as the following non-destroyed volumes are on it: {}.", sPool::getName, () -> getStoragePoolNonDestroyedVolumesLog(sPool.getId())); + throw new CloudRuntimeException(String.format("Cannot delete pool %s as there are non-destroyed volumes associated to this pool.", sPool.getName())); } // force expunge non-destroyed volumes List vols = volumeDao.listVolumesToBeDestroyed(); @@ -1265,9 +1490,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C AsyncCallFuture future = volService.expungeVolumeAsync(volFactory.getVolume(vol.getId())); try { future.get(); - } catch (InterruptedException e) { - logger.debug("expunge volume failed:" + vol.getId(), e); - } catch (ExecutionException e) { + } catch (InterruptedException | ExecutionException e) { logger.debug("expunge volume failed:" + vol.getId(), e); } } @@ -1276,7 +1499,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // Check if the pool has associated volumes in the volumes table // If it does , then you cannot delete the pool if (vlms.first() > 0) { - throw new CloudRuntimeException("Cannot delete pool " + sPool.getName() + " as there are associated volumes for this pool"); + logger.debug("Cannot delete storage pool {} as the following non-destroyed volumes are on it: {}.", sPool::getName, () -> getStoragePoolNonDestroyedVolumesLog(sPool.getId())); + throw new CloudRuntimeException(String.format("Cannot delete pool %s as there are non-destroyed volumes associated to this pool.", sPool.getName())); } } @@ -1299,6 +1523,23 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return lifeCycle.deleteDataStore(store); } + protected String getStoragePoolNonDestroyedVolumesLog(long storagePoolId) { + StringBuilder sb = new StringBuilder(); + List nonDestroyedVols = volumeDao.findByPoolId(storagePoolId, null).stream().filter(vol -> vol.getState() != Volume.State.Destroy).collect(Collectors.toList()); + VMInstanceVO volInstance; + List logMessageInfo = new ArrayList<>(); + + sb.append("["); + for (VolumeVO vol : nonDestroyedVols) { + volInstance = _vmInstanceDao.findById(vol.getInstanceId()); + logMessageInfo.add(String.format("Volume [%s] (attached to VM [%s])", vol.getUuid(), volInstance.getUuid())); + } + sb.append(String.join(", ", logMessageInfo)); + sb.append("]"); + + return sb.toString(); + } + @Override public boolean connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException { StoragePool pool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); @@ -1442,13 +1683,13 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } for (Long hostId : hostIds) { try { - List answers = new ArrayList(); + List answers = new ArrayList<>(); Command[] cmdArray = cmds.toCommands(); for (Command cmd : cmdArray) { long targetHostId = _hvGuruMgr.getGuruProcessedCommandTargetHost(hostId, cmd); answers.add(_agentMgr.send(targetHostId, cmd)); } - return new Pair(hostId, answers.toArray(new Answer[answers.size()])); + return new Pair<>(hostId, answers.toArray(new Answer[answers.size()])); } catch (AgentUnavailableException e) { logger.debug("Unable to send storage pool command to " + pool + " via " + hostId, e); } catch (OperationTimedoutException e) { @@ -1463,7 +1704,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C public Pair sendToPool(StoragePool pool, long[] hostIdsToTryFirst, List hostIdsToAvoid, Command cmd) throws StorageUnavailableException { Commands cmds = new Commands(cmd); Pair result = sendToPool(pool, hostIdsToTryFirst, hostIdsToAvoid, cmds); - return new Pair(result.first(), result.second()[0]); + return new Pair<>(result.first(), result.second()[0]); } private void cleanupInactiveTemplates() { @@ -1785,7 +2026,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @DB List findAllVolumeIdInSnapshotTable(Long storeId) { String sql = "SELECT volume_id from snapshots, snapshot_store_ref WHERE snapshots.id = snapshot_store_ref.snapshot_id and store_id=? GROUP BY volume_id"; - List list = new ArrayList(); + List list = new ArrayList<>(); try { TransactionLegacy txn = TransactionLegacy.currentTxn(); ResultSet rs = null; @@ -1813,7 +2054,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C pstmt = txn.prepareAutoCloseStatement(sql); pstmt.setLong(1, volumeId); rs = pstmt.executeQuery(); - List list = new ArrayList(); + List list = new ArrayList<>(); while (rs.next()) { list.add(rs.getString(1)); } @@ -1921,6 +2162,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override @DB + @ActionEvent(eventType = EventTypes.EVENT_MAINTENANCE_PREPARE_PRIMARY_STORAGE, + eventDescription = "preparing storage pool for maintenance", async = true) public PrimaryDataStoreInfo preparePrimaryStorageForMaintenance(Long primaryStorageId) throws ResourceUnavailableException, InsufficientCapacityException { StoragePoolVO primaryStorage = null; primaryStorage = _storagePoolDao.findById(primaryStorageId); @@ -1989,6 +2232,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override @DB + @ActionEvent(eventType = EventTypes.EVENT_MAINTENANCE_CANCEL_PRIMARY_STORAGE, + eventDescription = "canceling maintenance for primary storage pool", async = true) public PrimaryDataStoreInfo cancelPrimaryStorageForMaintenance(CancelPrimaryStorageMaintenanceCmd cmd) throws ResourceUnavailableException { Long primaryStorageId = cmd.getId(); StoragePoolVO primaryStorage = null; @@ -2058,7 +2303,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } if (!answer.getResult()) { - throw new CloudRuntimeException(String.format("Unable to process ModifyStoragePoolCommand for pool %s on the host %s due to ", pool.getUuid(), hostId, answer.getDetails())); + throw new CloudRuntimeException(String.format("Unable to process ModifyStoragePoolCommand for pool %s on the host %s due to %s", pool.getUuid(), hostId, answer.getDetails())); } assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" + @@ -2293,7 +2538,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // Prepare for the syncvolumepath command DataTO volTO = volFactory.getVolume(volume.getId()).getTO(); DiskTO disk = new DiskTO(volTO, volume.getDeviceId(), volume.getPath(), volume.getVolumeType()); - Map details = new HashMap(); + Map details = new HashMap<>(); details.put(DiskTO.PROTOCOL_TYPE, Storage.StoragePoolType.DatastoreCluster.toString()); disk.setDetails(details); @@ -2412,7 +2657,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId); } - List hosts = new ArrayList(); + List hosts = new ArrayList<>(); if (hostId != null) { hosts.add(hostId); } else { @@ -2567,11 +2812,18 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); - if (storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canHostAccessStoragePool(host, pool)) { - return true; + return (storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canHostAccessStoragePool(host, pool)); + } + + @Override + public boolean canHostPrepareStoragePoolAccess(Host host, StoragePool pool) { + if (host == null || pool == null || !pool.isManaged()) { + return false; } - return false; + DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); + DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); + return storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canHostPrepareStoragePoolAccess(host, pool); } @Override @@ -2890,21 +3142,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (CollectionUtils.isEmpty(volumes)) { return false; } - List> answers = new ArrayList>(); + List> answers = new ArrayList<>(); for (Pair volumeDiskProfilePair : volumes) { - String storagePolicyId = null; Volume volume = volumeDiskProfilePair.first(); DiskProfile diskProfile = volumeDiskProfilePair.second(); - if (volume.getVolumeType() == Type.ROOT) { - Long vmId = volume.getInstanceId(); - if (vmId != null) { - VMInstanceVO vm = _vmInstanceDao.findByIdIncludingRemoved(vmId); - storagePolicyId = _serviceOfferingDetailsDao.getDetail(vm.getServiceOfferingId(), ApiConstants.STORAGE_POLICY); - } - } else { - storagePolicyId = _diskOfferingDetailsDao.getDetail(diskProfile.getDiskOfferingId(), ApiConstants.STORAGE_POLICY); - } + String storagePolicyId = _diskOfferingDetailsDao.getDetail(diskProfile.getDiskOfferingId(), ApiConstants.STORAGE_POLICY); Answer answer = getCheckDatastorePolicyComplianceAnswer(storagePolicyId, pool); if (answer != null) { answers.add(new Pair<>(volume, answer)); @@ -3237,7 +3480,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // Check if it's the only/first store in the zone if (stores.size() == 0) { List hypervisorTypes = _clusterDao.getAvailableHypervisorInZone(zoneId); - Set hypSet = new HashSet(hypervisorTypes); + Set hypSet = new HashSet<>(hypervisorTypes); TransactionLegacy txn = TransactionLegacy.open("AutomaticTemplateRegister"); SystemVmTemplateRegistration systemVmTemplateRegistration = new SystemVmTemplateRegistration(); String filePath = null; @@ -3247,6 +3490,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C throw new CloudRuntimeException("Failed to create temporary file path to mount the store"); } Pair storeUrlAndId = new Pair<>(url, store.getId()); + String nfsVersion = imageStoreDetailsUtil.getNfsVersion(store.getId()); for (HypervisorType hypervisorType : hypSet) { try { if (HypervisorType.Simulator == hypervisorType) { @@ -3263,7 +3507,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C templateVO = _templateStoreDao.findByTemplate(templateId, DataStoreRole.Image); if (templateVO != null) { try { - if (SystemVmTemplateRegistration.validateIfSeeded(url, templateVO.getInstallPath())) { + if (SystemVmTemplateRegistration.validateIfSeeded( + url, templateVO.getInstallPath(), nfsVersion)) { continue; } } catch (Exception e) { @@ -3271,7 +3516,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } } - SystemVmTemplateRegistration.mountStore(storeUrlAndId.first(), filePath); + SystemVmTemplateRegistration.mountStore(storeUrlAndId.first(), filePath, nfsVersion); if (templateVO != null && vmTemplateVO != null) { systemVmTemplateRegistration.registerTemplate(hypervisorAndTemplateName, storeUrlAndId, vmTemplateVO, templateVO, filePath); } else { @@ -3297,7 +3542,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C public ImageStore migrateToObjectStore(String name, String url, String providerName, Map details) throws DiscoveryException, InvalidParameterValueException { // check if current cloud is ready to migrate, we only support cloud with only NFS secondary storages List imgStores = _imageStoreDao.listImageStores(); - List nfsStores = new ArrayList(); + List nfsStores = new ArrayList<>(); if (imgStores != null && imgStores.size() > 0) { for (ImageStoreVO store : imgStores) { if (!store.getProviderName().equals(DataStoreProvider.NFS_IMAGE)) { @@ -3327,20 +3572,38 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return discoverImageStore(name, url, providerName, null, details); } + @Override + public ImageStore updateImageStore(UpdateImageStoreCmd cmd) { + return updateImageStoreStatus(cmd.getId(), cmd.getName(), cmd.getReadonly(), cmd.getCapacityBytes()); + } + @Override @ActionEvent(eventType = EventTypes.EVENT_UPDATE_IMAGE_STORE_ACCESS_STATE, eventDescription = "image store access updated") - public ImageStore updateImageStoreStatus(Long id, Boolean readonly) { + public ImageStore updateImageStoreStatus(Long id, String name, Boolean readonly, Long capacityBytes) { // Input validation ImageStoreVO imageStoreVO = _imageStoreDao.findById(id); if (imageStoreVO == null) { throw new IllegalArgumentException("Unable to find image store with ID: " + id); } - imageStoreVO.setReadonly(readonly); + if (com.cloud.utils.StringUtils.isNotBlank(name)) { + imageStoreVO.setName(name); + } + if (capacityBytes != null) { + imageStoreVO.setTotalSize(capacityBytes); + } + if (readonly != null) { + imageStoreVO.setReadonly(readonly); + } _imageStoreDao.update(id, imageStoreVO); return imageStoreVO; } + @Override + public ImageStore updateImageStoreStatus(Long id, Boolean readonly) { + return updateImageStoreStatus(id, null, readonly, null); + } + /** * @param poolId - Storage pool id for pool to update. * @param failOnChecks - If true, throw an error if pool type and state checks fail. @@ -3385,7 +3648,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } // find the host - List poolIds = new ArrayList(); + List poolIds = new ArrayList<>(); poolIds.add(pool.getId()); List hosts = _storagePoolHostDao.findHostsConnectedToPools(poolIds); if (hosts.size() > 0) { @@ -3422,7 +3685,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C VMTemplateZoneVO tmpltZone; List allTemplates = _vmTemplateDao.listAll(); - List dcIds = new ArrayList(); + List dcIds = new ArrayList<>(); if (zoneId != null) { dcIds.add(zoneId); } else { @@ -3539,7 +3802,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C throw ex; } - Map params = new HashMap(); + Map params = new HashMap<>(); params.put("zoneId", dcId); params.put("url", cmd.getUrl()); params.put("name", cmd.getUrl()); @@ -3627,8 +3890,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // Cleanup expired volume URLs List volumesOnImageStoreList = _volumeStoreDao.listVolumeDownloadUrls(); - HashSet expiredVolumeIds = new HashSet(); - HashSet activeVolumeIds = new HashSet(); + HashSet expiredVolumeIds = new HashSet<>(); + HashSet activeVolumeIds = new HashSet<>(); for (VolumeDataStoreVO volumeOnImageStore : volumesOnImageStoreList) { long volumeId = volumeOnImageStore.getVolumeId(); @@ -3776,6 +4039,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C STORAGE_POOL_DISK_WAIT, STORAGE_POOL_CLIENT_TIMEOUT, STORAGE_POOL_CLIENT_MAX_CONNECTIONS, + STORAGE_POOL_CONNECTED_CLIENTS_LIMIT, STORAGE_POOL_IO_POLICY, PRIMARY_STORAGE_DOWNLOAD_WAIT, SecStorageMaxMigrateSessions, @@ -3786,7 +4050,6 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C MountDisabledStoragePool, VmwareCreateCloneFull, VmwareAllowParallelExecution, - ConvertVmwareInstanceToKvmTimeout, DataStoreDownloadFollowRedirects }; } @@ -3851,10 +4114,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } try { - // Check URL UriUtils.validateUrl(url); - } catch (final Exception e) { - throw new InvalidParameterValueException(url + " is not a valid URL"); + } catch (InvalidParameterValueException e) { + throw new InvalidParameterValueException(url + " is not a valid URL:" + e.getMessage()); } // Check Unique object store url diff --git a/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java b/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java index 6a8e3f0ff51..f1c7c38b8dc 100644 --- a/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java +++ b/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java @@ -20,6 +20,7 @@ package com.cloud.storage; import java.util.ArrayList; import java.util.List; +import java.util.Map; import javax.inject.Inject; @@ -28,6 +29,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -49,6 +51,7 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.user.User; import com.cloud.user.dao.UserDao; +import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.ConsoleProxyVO; import com.cloud.vm.DomainRouterVO; @@ -89,6 +92,8 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation { @Inject PrimaryDataStoreDao primaryDataStoreDao; @Inject + StoragePoolDetailsDao storagePoolDetailsDao; + @Inject DataStoreManager dataStoreMgr; @Inject protected ResourceManager _resourceMgr; @@ -319,14 +324,25 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation { if (hosts == null || hosts.size() == 0) { return true; } + + Pair, Boolean> nfsMountOpts = storageManager.getStoragePoolNFSMountOpts(pool, null); // add heartbeat for (HostVO host : hosts) { - ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand(true, pool); + ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand(true, pool, nfsMountOpts.first()); final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd); if (answer == null || !answer.getResult()) { if (logger.isDebugEnabled()) { logger.debug("ModifyStoragePool add failed due to " + ((answer == null) ? "answer null" : answer.getDetails())); } + if (answer != null && nfsMountOpts.second()) { + logger.error(String.format("Unable to attach storage pool to the host %s due to %s", host, answer.getDetails())); + StringBuilder exceptionSB = new StringBuilder("Unable to attach storage pool to the host ").append(host.getName()); + String reason = storageManager.getStoragePoolMountFailureReason(answer.getDetails()); + if (reason!= null) { + exceptionSB.append(". ").append(reason).append("."); + } + throw new CloudRuntimeException(exceptionSB.toString()); + } } else { if (logger.isDebugEnabled()) { logger.debug("ModifyStoragePool add succeeded"); diff --git a/server/src/main/java/com/cloud/storage/TemplateProfile.java b/server/src/main/java/com/cloud/storage/TemplateProfile.java index 9f92d49c3a3..c2a3c506221 100644 --- a/server/src/main/java/com/cloud/storage/TemplateProfile.java +++ b/server/src/main/java/com/cloud/storage/TemplateProfile.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import com.cloud.cpu.CPU; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; @@ -28,6 +29,7 @@ public class TemplateProfile { Long userId; String name; String displayText; + CPU.CPUArch arch; Integer bits; Boolean passwordEnabled; Boolean sshKeyEnbaled; @@ -56,13 +58,14 @@ public class TemplateProfile { Boolean forCks; Long size; - public TemplateProfile(Long templateId, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHvm, String url, + public TemplateProfile(Long templateId, Long userId, String name, String displayText, CPU.CPUArch arch, Integer bits, Boolean passwordEnabled, Boolean requiresHvm, String url, Boolean isPublic, Boolean featured, Boolean isExtractable, ImageFormat format, Long guestOsId, List zoneIdList, HypervisorType hypervisorType, String accountName, Long domainId, Long accountId, String chksum, Boolean bootable, Map details, Boolean sshKeyEnabled) { this.templateId = templateId; this.userId = userId; this.name = name; this.displayText = displayText; + this.arch = arch; this.bits = bits; this.passwordEnabled = passwordEnabled; this.requiresHvm = requiresHvm; @@ -93,15 +96,16 @@ public class TemplateProfile { else this.zoneIdList = null; } - public TemplateProfile(Long templateId, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHvm, String url, - Boolean isPublic, Boolean featured, Boolean isExtractable, ImageFormat format, Long guestOsId, List zoneId, + public TemplateProfile(Long templateId, Long userId, String name, String displayText, CPU.CPUArch arch, Integer bits, Boolean passwordEnabled, Boolean requiresHvm, String url, + Boolean isPublic, Boolean featured, Boolean isExtractable, ImageFormat format, Long guestOsId, List zoneId, - HypervisorType hypervisorType, String accountName, Long domainId, Long accountId, String chksum, Boolean bootable, String templateTag, Map details, - Boolean sshKeyEnabled, Long imageStoreId, Boolean isDynamicallyScalable, TemplateType templateType, Boolean directDownload, Boolean deployAsIs) { + HypervisorType hypervisorType, String accountName, Long domainId, Long accountId, String chksum, Boolean bootable, String templateTag, Map details, + Boolean sshKeyEnabled, Long imageStoreId, Boolean isDynamicallyScalable, TemplateType templateType, Boolean directDownload, Boolean deployAsIs) { this(templateId, userId, name, displayText, + arch, bits, passwordEnabled, requiresHvm, @@ -346,4 +350,8 @@ public class TemplateProfile { public void setForCks(Boolean forCks) { this.forCks = forCks; } + + public CPU.CPUArch getArch() { + return arch; + } } diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index ac6cdea7e0d..cb859f2dde9 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -53,6 +53,7 @@ import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd; import org.apache.cloudstack.api.response.GetUploadParamsResponse; import org.apache.cloudstack.backup.Backup; import org.apache.cloudstack.backup.BackupManager; +import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.direct.download.DirectDownloadHelper; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; @@ -125,8 +126,6 @@ import com.cloud.agent.api.ModifyTargetsCommand; import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DiskTO; import com.cloud.api.ApiDBUtils; -import com.cloud.api.query.dao.ServiceOfferingJoinDao; -import com.cloud.api.query.vo.ServiceOfferingJoinVO; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.Resource.ResourceType; @@ -157,6 +156,7 @@ import com.cloud.offering.DiskOffering; import com.cloud.org.Grouping; import com.cloud.projects.Project; import com.cloud.projects.ProjectManager; +import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceState; import com.cloud.serializer.GsonHelper; import com.cloud.server.ManagementService; @@ -256,6 +256,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic @Inject private ConfigurationManager _configMgr; @Inject + private ResourceManager _resourceMgr; + @Inject private VolumeDao _volsDao; @Inject private VolumeDetailsDao _volsDetailsDao; @@ -270,8 +272,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic @Inject private ServiceOfferingDetailsDao _serviceOfferingDetailsDao; @Inject - private ServiceOfferingJoinDao serviceOfferingJoinDao; - @Inject private UserVmDao _userVmDao; @Inject private UserVmDetailsDao userVmDetailsDao; @@ -345,6 +345,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic protected ProjectManager projectManager; @Inject protected StoragePoolDetailsDao storagePoolDetailsDao; + @Inject + private BackupDao backupDao; protected Gson _gson; @@ -566,7 +568,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic _resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(ownerId), ResourceType.secondary_storage); } - sanitizeFormat(format); + checkFormatWithSupportedHypervisorsInZone(format, zoneId); // Check that the disk offering specified is valid if (diskOfferingId != null) { @@ -582,6 +584,15 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic return false; } + private void checkFormatWithSupportedHypervisorsInZone(String format, Long zoneId) { + ImageFormat imageformat = ImageFormat.valueOf(format); + final List supportedHypervisorTypesInZone = _resourceMgr.getSupportedHypervisorTypes(zoneId, false, null); + final HypervisorType hypervisorTypeFromFormat = ApiDBUtils.getHypervisorTypeFromFormat(zoneId, imageformat); + if (!(supportedHypervisorTypesInZone.contains(hypervisorTypeFromFormat))) { + throw new InvalidParameterValueException(String.format("The %s hypervisor supported for %s file format, is not found on the zone", hypervisorTypeFromFormat.toString(), format)); + } + } + public String getRandomVolumeName() { return UUID.randomUUID().toString(); } @@ -1023,7 +1034,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic // if VM Id is provided, attach the volume to the VM if (cmd.getVirtualMachineId() != null) { try { - attachVolumeToVM(cmd.getVirtualMachineId(), volume.getId(), volume.getDeviceId()); + attachVolumeToVM(cmd.getVirtualMachineId(), volume.getId(), volume.getDeviceId(), false); } catch (Exception ex) { StringBuilder message = new StringBuilder("Volume: "); message.append(volume.getUuid()); @@ -1043,7 +1054,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic created = false; VolumeInfo vol = volFactory.getVolume(cmd.getEntityId()); vol.stateTransit(Volume.Event.DestroyRequested); - throw new CloudRuntimeException("Failed to create volume: " + volume.getId(), e); + throw new CloudRuntimeException("Failed to create volume: " + volume.getUuid(), e); } finally { if (!created) { logger.trace("Decrementing volume resource count for account id=" + volume.getAccountId() + " as volume failed to create on the backend"); @@ -1347,8 +1358,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic boolean isNotIso = format != null && format != ImageFormat.ISO; boolean isRoot = Volume.Type.ROOT.equals(volume.getVolumeType()); - ServiceOfferingJoinVO serviceOfferingView = serviceOfferingJoinDao.findById(diskOffering.getId()); - boolean isOfferingEnforcingRootDiskSize = serviceOfferingView != null && serviceOfferingView.getRootDiskSize() > 0; + boolean isOfferingEnforcingRootDiskSize = diskOffering.isComputeOnly() && diskOffering.getDiskSize() > 0; return isOfferingEnforcingRootDiskSize && isRoot && isNotIso; } @@ -1379,7 +1389,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic private VolumeVO orchestrateResizeVolume(long volumeId, long currentSize, long newSize, Long newMinIops, Long newMaxIops, Integer newHypervisorSnapshotReserve, Long newDiskOfferingId, boolean shrinkOk) { - final VolumeVO volume = _volsDao.findById(volumeId); + VolumeVO volume = _volsDao.findById(volumeId); UserVmVO userVm = _userVmDao.findById(volume.getInstanceId()); StoragePoolVO storagePool = _storagePoolDao.findById(volume.getPoolId()); Long currentDiskOfferingId = volume.getDiskOfferingId(); @@ -1487,12 +1497,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } // Update size if volume has same size as before, else it is already updated - final VolumeVO volumeNow = _volsDao.findById(volumeId); - if (currentSize == volumeNow.getSize() && currentSize != newSize) { + volume = _volsDao.findById(volumeId); + if (currentSize == volume.getSize() && currentSize != newSize) { volume.setSize(newSize); - } else if (volumeNow.getSize() != newSize) { + } else if (volume.getSize() != newSize) { // consider the updated size as the new size - newSize = volumeNow.getSize(); + newSize = volume.getSize(); } _volsDao.update(volume.getId(), volume); @@ -1684,6 +1694,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } public void validateDestroyVolume(Volume volume, Account caller, boolean expunge, boolean forceExpunge) { + if (volume.isDeleteProtection()) { + throw new InvalidParameterValueException(String.format( + "Volume [id = %s, name = %s] has delete protection enabled and cannot be deleted.", + volume.getUuid(), volume.getName())); + } + if (expunge) { // When trying to expunge, permission is denied when the caller is not an admin and the AllowUserExpungeRecoverVolume is false for the caller. final Long userId = caller.getAccountId(); @@ -1962,7 +1978,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic boolean shrinkOk = cmd.isShrinkOk(); boolean autoMigrateVolume = cmd.getAutoMigrate(); - VolumeVO volume = _volsDao.findById(cmd.getId()); + return changeDiskOfferingForVolumeInternal(cmd.getId(), newDiskOfferingId, newSize, newMinIops, newMaxIops, autoMigrateVolume, shrinkOk); + } + + @Override + public Volume changeDiskOfferingForVolumeInternal(Long volumeId, Long newDiskOfferingId, Long newSize, Long newMinIops, Long newMaxIops, boolean autoMigrateVolume, boolean shrinkOk) throws ResourceAllocationException { + + VolumeVO volume = _volsDao.findById(volumeId); if (volume == null) { throw new InvalidParameterValueException("No such volume"); } @@ -1970,11 +1992,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic /* Does the caller have authority to act on this volume? */ _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, volume); - return changeDiskOfferingForVolumeInternal(volume, newDiskOfferingId, newSize, newMinIops, newMaxIops, autoMigrateVolume, shrinkOk); - } - - private Volume changeDiskOfferingForVolumeInternal(VolumeVO volume, Long newDiskOfferingId, Long newSize, Long newMinIops, Long newMaxIops, boolean autoMigrateVolume, boolean shrinkOk) throws ResourceAllocationException { - DiskOfferingVO existingDiskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId()); + long existingDiskOfferingId = volume.getDiskOfferingId(); + DiskOfferingVO existingDiskOffering = _diskOfferingDao.findByIdIncludingRemoved(existingDiskOfferingId); DiskOfferingVO newDiskOffering = _diskOfferingDao.findById(newDiskOfferingId); Integer newHypervisorSnapshotReserve = null; @@ -1986,6 +2005,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic Long[] updateNewMinIops = {newMinIops}; Long[] updateNewMaxIops = {newMaxIops}; Integer[] updateNewHypervisorSnapshotReserve = {newHypervisorSnapshotReserve}; + volService.validateChangeDiskOfferingEncryptionType(existingDiskOfferingId, newDiskOfferingId); validateVolumeResizeWithNewDiskOfferingAndLoad(volume, existingDiskOffering, newDiskOffering, updateNewSize, updateNewMinIops, updateNewMaxIops, updateNewHypervisorSnapshotReserve); newSize = updateNewSize[0]; newMinIops = updateNewMinIops[0]; @@ -2048,6 +2068,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic if (!volumeMigrateRequired && !volumeResizeRequired) { _volsDao.updateDiskOffering(volume.getId(), newDiskOffering.getId()); volume = _volsDao.findById(volume.getId()); + updateStorageWithTheNewDiskOffering(volume, newDiskOffering); + return volume; } @@ -2084,6 +2106,18 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic return volume; } + private void updateStorageWithTheNewDiskOffering(VolumeVO volume, DiskOfferingVO newDiskOffering) { + DataStore dataStore = dataStoreMgr.getDataStore(volume.getPoolId(), DataStoreRole.Primary); + DataStoreDriver dataStoreDriver = dataStore != null ? dataStore.getDriver() : null; + + if (dataStoreDriver instanceof PrimaryDataStoreDriver) { + PrimaryDataStoreDriver storageDriver = (PrimaryDataStoreDriver)dataStoreDriver; + if (storageDriver.informStorageForDiskOfferingChange()) { + storageDriver.updateStorageWithTheNewDiskOffering(volume, newDiskOffering); + } + } + } + /** * This method is to compare long values, in miniops and maxiops a or b can be null or 0. * Use this method to treat 0 and null as same @@ -2317,7 +2351,17 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic * the actual disk size. */ if (currentSize > newSize) { - if (volume != null && ImageFormat.QCOW2.equals(volume.getFormat()) && !Volume.State.Allocated.equals(volume.getState())) { + if (shrinkOk) { + VMInstanceVO vm = _vmInstanceDao.findById(volume.getInstanceId()); + if (vm != null && vm.getType().equals(VirtualMachine.Type.User)) { + UserVmVO userVm = _userVmDao.findById(volume.getInstanceId()); + if (userVm != null && UserVmManager.SHAREDFSVM.equals(userVm.getUserVmType())) { + throw new InvalidParameterValueException("Shrink volume cannot be done on a Shared FileSystem Instance"); + } + } + } + + if (volume != null && ImageFormat.QCOW2.equals(volume.getFormat()) && !Volume.State.Allocated.equals(volume.getState()) && !StoragePoolType.StorPool.equals(volume.getPoolType())) { String message = "Unable to shrink volumes of type QCOW2"; logger.warn(message); throw new InvalidParameterValueException(message); @@ -2337,7 +2381,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic @Override @ActionEvent(eventType = EventTypes.EVENT_VOLUME_ATTACH, eventDescription = "attaching volume", async = true) public Volume attachVolumeToVM(AttachVolumeCmd command) { - return attachVolumeToVM(command.getVirtualMachineId(), command.getId(), command.getDeviceId()); + return attachVolumeToVM(command.getVirtualMachineId(), command.getId(), command.getDeviceId(), false); } private Volume orchestrateAttachVolumeToVM(Long vmId, Long volumeId, Long deviceId) { @@ -2445,13 +2489,17 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic return newVol; } - public Volume attachVolumeToVM(Long vmId, Long volumeId, Long deviceId) { + public Volume attachVolumeToVM(Long vmId, Long volumeId, Long deviceId, Boolean allowAttachForSharedFS) { Account caller = CallContext.current().getCallingAccount(); VolumeInfo volumeToAttach = getAndCheckVolumeInfo(volumeId); UserVmVO vm = getAndCheckUserVmVO(vmId, volumeToAttach); + if (!allowAttachForSharedFS && UserVmManager.SHAREDFSVM.equals(vm.getUserVmType())) { + throw new InvalidParameterValueException("Can't attach a volume to a Shared FileSystem Instance"); + } + checkDeviceId(deviceId, volumeToAttach, vm); checkNumberOfAttachedVolumes(deviceId, vm); @@ -2572,7 +2620,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } // if target VM has backups - if (vm.getBackupOfferingId() != null || vm.getBackupVolumeList().size() > 0) { + List backups = backupDao.listByVmId(vm.getDataCenterId(), vm.getId()); + if (vm.getBackupOfferingId() != null && !backups.isEmpty()) { throw new InvalidParameterValueException(String.format("Unable to attach volume to VM %s/%s, please specify a VM that does not have any backups", vm.getName(), vm.getUuid())); } } @@ -2709,13 +2758,15 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic @Override @ActionEvent(eventType = EventTypes.EVENT_VOLUME_UPDATE, eventDescription = "updating volume", async = true) - public Volume updateVolume(long volumeId, String path, String state, Long storageId, Boolean displayVolume, + public Volume updateVolume(long volumeId, String path, String state, Long storageId, + Boolean displayVolume, Boolean deleteProtection, String customId, long entityOwnerId, String chainInfo, String name) { Account caller = CallContext.current().getCallingAccount(); if (!_accountMgr.isRootAdmin(caller.getId())) { if (path != null || state != null || storageId != null || displayVolume != null || customId != null || chainInfo != null) { - throw new InvalidParameterValueException("The domain admin and normal user are not allowed to update volume except volume name"); + throw new InvalidParameterValueException("The domain admin and normal user are " + + "not allowed to update volume except volume name & delete protection"); } } @@ -2767,6 +2818,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic volume.setName(name); } + if (deleteProtection != null) { + volume.setDeleteProtection(deleteProtection); + } + updateDisplay(volume, displayVolume); _volsDao.update(volumeId, volume); @@ -2866,6 +2921,11 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic // Check that the VM is in the correct state UserVmVO vm = _userVmDao.findById(vmId); + + if (UserVmManager.SHAREDFSVM.equals(vm.getUserVmType())) { + throw new InvalidParameterValueException("Can't detach a volume from a Shared FileSystem Instance"); + } + if (vm.getState() != State.Running && vm.getState() != State.Stopped && vm.getState() != State.Destroyed) { throw new InvalidParameterValueException("Please specify a VM that is either running or stopped."); } @@ -3305,6 +3365,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } DiskOfferingVO newDiskOffering = retrieveAndValidateNewDiskOffering(cmd); + // if no new disk offering was provided, and match is required, default to the offering of the + // original volume. otherwise it falls through with no check and the target volume may + // not work correctly in some scenarios with the target provider. Adminstrator + // can disable this flag dynamically for certain bulk migration scenarios if required. + if (newDiskOffering == null && Boolean.TRUE.equals(MatchStoragePoolTagsWithDiskOffering.value())) { + newDiskOffering = diskOffering; + } validateConditionsToReplaceDiskOfferingOfVolume(vol, newDiskOffering, destPool); if (vm != null) { @@ -3390,14 +3457,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic Account caller = CallContext.current().getCallingAccount(); DataCenter zone = null; Volume volume = _volsDao.findById(cmd.getId()); - if (volume != null) { - zone = _dcDao.findById(volume.getDataCenterId()); + if (volume == null) { + throw new InvalidParameterValueException(String.format("Provided volume id is not valid: %s", cmd.getId())); } + zone = _dcDao.findById(volume.getDataCenterId()); + _accountMgr.checkAccess(caller, newDiskOffering, zone); - DiskOfferingVO currentDiskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId()); - if (VolumeApiServiceImpl.MatchStoragePoolTagsWithDiskOffering.valueIn(zone.getId()) && !doesNewDiskOfferingHasTagsAsOldDiskOffering(currentDiskOffering, newDiskOffering)) { - throw new InvalidParameterValueException(String.format("Existing disk offering storage tags of the volume %s does not contain in the new disk offering %s ", volume.getUuid(), newDiskOffering.getUuid())); - } return newDiskOffering; } @@ -3482,6 +3547,18 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic return doesTargetStorageSupportDiskOffering(destPool, targetStoreTags); } + public static boolean doesNewDiskOfferingHasTagsAsOldDiskOffering(DiskOfferingVO oldDO, DiskOfferingVO newDO) { + String[] oldDOStorageTags = oldDO.getTagsArray(); + String[] newDOStorageTags = newDO.getTagsArray(); + if (oldDOStorageTags.length == 0) { + return true; + } + if (newDOStorageTags.length == 0) { + return false; + } + return CollectionUtils.isSubCollection(Arrays.asList(oldDOStorageTags), Arrays.asList(newDOStorageTags)); + } + @Override public boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, String diskOfferingTags) { Pair, Boolean> storagePoolTags = getStoragePoolTags(destPool); @@ -3511,18 +3588,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic return result; } - public static boolean doesNewDiskOfferingHasTagsAsOldDiskOffering(DiskOfferingVO oldDO, DiskOfferingVO newDO) { - String[] oldDOStorageTags = oldDO.getTagsArray(); - String[] newDOStorageTags = newDO.getTagsArray(); - if (oldDOStorageTags.length == 0) { - return true; - } - if (newDOStorageTags.length == 0) { - return false; - } - return CollectionUtils.isSubCollection(Arrays.asList(oldDOStorageTags), Arrays.asList(newDOStorageTags)); - } - /** * Returns a {@link Pair}, where the first value is the list of the StoragePool tags, and the second value is whether the returned tags are to be interpreted as a rule, * or a normal list of tags. @@ -3727,7 +3792,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic private boolean isOperationSupported(VMTemplateVO template, UserVmVO userVm) { if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM && - (userVm == null || !UserVmManager.CKS_NODE.equals(userVm.getUserVmType()))) { + (userVm == null || !UserVmManager.CKS_NODE.equals(userVm.getUserVmType()) || !UserVmManager.SHAREDFSVM.equals(userVm.getUserVmType()))) { return false; } return true; @@ -4263,7 +4328,11 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } } else if (storeForNewStoreScope.getScopeType() == ScopeType.HOST && (storeForExistingStoreScope.getScopeType() == ScopeType.CLUSTER || storeForExistingStoreScope.getScopeType() == ScopeType.ZONE)) { - Long hostId = _vmInstanceDao.findById(existingVolume.getInstanceId()).getHostId(); + VMInstanceVO vm = _vmInstanceDao.findById(existingVolume.getInstanceId()); + Long hostId = vm.getHostId(); + if (hostId == null) { + hostId = vm.getLastHostId(); + } if (storeForNewStoreScope.getScopeId().equals(hostId)) { return false; } diff --git a/server/src/main/java/com/cloud/storage/listener/SnapshotStateListener.java b/server/src/main/java/com/cloud/storage/listener/SnapshotStateListener.java index 0dd7e77ac33..18ab4168c24 100644 --- a/server/src/main/java/com/cloud/storage/listener/SnapshotStateListener.java +++ b/server/src/main/java/com/cloud/storage/listener/SnapshotStateListener.java @@ -26,11 +26,9 @@ import javax.annotation.PostConstruct; import javax.inject.Inject; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.framework.events.EventBus; -import org.apache.cloudstack.framework.events.EventBusException; -import org.apache.logging.log4j.Logger; +import org.apache.cloudstack.framework.events.EventDistributor; import org.apache.logging.log4j.LogManager; -import org.springframework.beans.factory.NoSuchBeanDefinitionException; +import org.apache.logging.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.configuration.Config; @@ -47,12 +45,12 @@ import com.cloud.utils.fsm.StateMachine2; @Component public class SnapshotStateListener implements StateListener { - protected static EventBus s_eventBus = null; protected static ConfigurationDao s_configDao; @Inject private ConfigurationDao configDao; + private EventDistributor eventDistributor = null; protected Logger logger = LogManager.getLogger(getClass()); public SnapshotStateListener() { @@ -64,6 +62,10 @@ public class SnapshotStateListener implements StateListener eventDescription = new HashMap(); + Map eventDescription = new HashMap<>(); eventDescription.put("resource", resourceName); eventDescription.put("id", vo.getUuid()); eventDescription.put("old-state", oldState.name()); @@ -104,11 +104,7 @@ public class SnapshotStateListener implements StateListener { - protected static EventBus s_eventBus = null; protected ConfigurationDao _configDao; protected VMInstanceDao _vmInstanceDao; + private EventDistributor eventDistributor; protected Logger logger = LogManager.getLogger(getClass()); public VolumeStateListener(ConfigurationDao configDao, VMInstanceDao vmInstanceDao) { @@ -58,6 +55,10 @@ public class VolumeStateListener implements StateListener this._vmInstanceDao = vmInstanceDao; } + public void setEventDistributor(EventDistributor eventDistributor) { + this.eventDistributor = eventDistributor; + } + @Override public boolean preStateTransitionEvent(State oldState, Event event, State newState, Volume vo, boolean status, Object opaque) { pubishOnEventBus(event.name(), "preStateTransitionEvent", vo, oldState, newState); @@ -93,23 +94,21 @@ public class VolumeStateListener implements StateListener return true; } - private void pubishOnEventBus(String event, String status, Volume vo, State oldState, State newState) { + private void pubishOnEventBus(String event, String status, Volume vo, State oldState, State newState) { String configKey = Config.PublishResourceStateEvent.key(); String value = _configDao.getValue(configKey); boolean configValue = Boolean.parseBoolean(value); if(!configValue) return; - try { - s_eventBus = ComponentContext.getComponent(EventBus.class); - } catch (NoSuchBeanDefinitionException nbe) { - return; // no provider is configured to provide events bus, so just return + if (eventDistributor == null) { + setEventDistributor(ComponentContext.getComponent(EventDistributor.class)); } String resourceName = getEntityFromClassName(Volume.class.getName()); org.apache.cloudstack.framework.events.Event eventMsg = new org.apache.cloudstack.framework.events.Event(ManagementService.Name, EventCategory.RESOURCE_STATE_CHANGE_EVENT.getName(), event, resourceName, - vo.getUuid()); + vo.getUuid()); Map eventDescription = new HashMap(); eventDescription.put("resource", resourceName); eventDescription.put("id", vo.getUuid()); @@ -120,11 +119,7 @@ public class VolumeStateListener implements StateListener eventDescription.put("eventDateTime", eventDate); eventMsg.setDescription(eventDescription); - try { - s_eventBus.publish(eventMsg); - } catch (EventBusException e) { - logger.warn("Failed to state change event on the event bus."); - } + eventDistributor.publish(eventMsg); } private String getEntityFromClassName(String entityClassName) { diff --git a/server/src/main/java/com/cloud/storage/secondary/SecondaryStorageVmManager.java b/server/src/main/java/com/cloud/storage/secondary/SecondaryStorageVmManager.java index b534fc39e66..a34658a7f6d 100644 --- a/server/src/main/java/com/cloud/storage/secondary/SecondaryStorageVmManager.java +++ b/server/src/main/java/com/cloud/storage/secondary/SecondaryStorageVmManager.java @@ -17,16 +17,24 @@ package com.cloud.storage.secondary; import java.util.List; +import java.util.Map; import org.apache.cloudstack.framework.config.ConfigKey; import com.cloud.agent.api.Command; import com.cloud.agent.api.StartupCommand; +import com.cloud.deploy.DeploymentPlanner; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.ResourceUnavailableException; import com.cloud.host.HostVO; import com.cloud.utils.Pair; import com.cloud.utils.component.Manager; import com.cloud.vm.SecondaryStorageVm; import com.cloud.vm.SecondaryStorageVmVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; public interface SecondaryStorageVmManager extends Manager { @@ -47,6 +55,10 @@ public interface SecondaryStorageVmManager extends Manager { public SecondaryStorageVmVO startSecStorageVm(long ssVmVmId); + void startSecStorageVmForHA(VirtualMachine vm, Map params, + DeploymentPlanner planner) throws InsufficientCapacityException, ResourceUnavailableException, + ConcurrentOperationException, OperationTimedoutException; + public boolean stopSecStorageVm(long ssVmVmId); public boolean rebootSecStorageVm(long ssVmVmId); diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java index 56981cfe55c..50c8ff8b83a 100755 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -33,6 +33,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.acl.SecurityChecker; +import com.cloud.api.ApiDBUtils; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -40,6 +41,7 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.user.snapshot.CopySnapshotCmd; import org.apache.cloudstack.api.command.user.snapshot.CreateSnapshotPolicyCmd; import org.apache.cloudstack.api.command.user.snapshot.DeleteSnapshotPoliciesCmd; +import org.apache.cloudstack.api.command.user.snapshot.ExtractSnapshotCmd; import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotPoliciesCmd; import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotsCmd; import org.apache.cloudstack.api.command.user.snapshot.UpdateSnapshotPolicyCmd; @@ -72,10 +74,12 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.builder.ReflectionToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; import org.springframework.stereotype.Component; @@ -466,6 +470,74 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement return snapshot; } + @Override + @ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_EXTRACT, eventDescription = "extracting snapshot", async = true) + public String extractSnapshot(ExtractSnapshotCmd cmd) { + Account caller = CallContext.current().getCallingAccount(); + Long snapshotId = cmd.getId(); + Long zoneId = cmd.getZoneId(); + + if (!_accountMgr.isRootAdmin(caller.getId()) && ApiDBUtils.isExtractionDisabled()) { + logger.error("Extraction is disabled through [{}].", Config.DisableExtraction); + throw new PermissionDeniedException("Extraction could not be completed."); + } + + SnapshotVO snapshot = _snapshotDao.findById(snapshotId); + if (snapshot == null || snapshot.getRemoved() != null) { + logger.error("Unable to find active [{}].", snapshot); + throw new InvalidParameterValueException("Unable to find active snapshot."); + } + + if (zoneId != null && dataCenterDao.findById(zoneId) == null) { + logger.error("Invalid zone id [{}].", zoneId); + throw new IllegalArgumentException("Please specify a valid zone."); + } + + _accountMgr.checkAccess(caller, null, true, snapshot); + + List imageStores = dataStoreMgr.getImageStoresByScope(new ZoneScope(zoneId)); + + if (CollectionUtils.isEmpty(imageStores)) { + logger.error("Could not find any zone storages."); + throw new InvalidParameterValueException("Extraction could not be completed"); + } + + SnapshotDataStoreVO snapshotDataStoreReference = null; + ImageStoreEntity chosenStore = null; + + for (DataStore store : imageStores) { + snapshotDataStoreReference = _snapshotStoreDao.findByStoreSnapshot(DataStoreRole.Image, store.getId(), snapshotId); + if (snapshotDataStoreReference == null) { + logger.trace("Snapshot [{}] not in store [{}].", snapshotId, store.getId()); + continue; + } + String existingExtractUrl = snapshotDataStoreReference.getExtractUrl(); + if (existingExtractUrl != null) { + logger.debug("Extract URL already exists: [{}].", existingExtractUrl); + return existingExtractUrl; + } + chosenStore = (ImageStoreEntity) store; + logger.debug("Snapshot [{}] found in store [{}].", snapshotId, chosenStore.getId()); + break; + } + + if (ObjectUtils.anyNull(chosenStore, snapshotDataStoreReference)) { + logger.error("Snapshot [{}] not found in any secondary storage.", snapshotId); + throw new InvalidParameterValueException("Snapshot not found."); + } + + snapshotSrv.syncVolumeSnapshotsToRegionStore(snapshot.getVolumeId(), chosenStore); + + SnapshotInfo snapshotObject = snapshotFactory.getSnapshot(snapshotId, chosenStore); + String extractUrl = chosenStore.createEntityExtractUrl(snapshotObject.getPath(), snapshotObject.getBaseVolume().getFormat(), snapshotObject); + logger.debug("Extract URL [{}] created for snapshot [{}].", extractUrl, snapshot); + snapshotDataStoreReference.setExtractUrl(extractUrl); + snapshotDataStoreReference.setExtractUrlCreated(DateUtil.now()); + _snapshotStoreDao.update(snapshotDataStoreReference.getId(), snapshotDataStoreReference); + + return extractUrl; + } + @Override public Snapshot archiveSnapshot(Long snapshotId) { SnapshotInfo snapshotOnPrimary = snapshotFactory.getSnapshotOnPrimaryStore(snapshotId); @@ -983,7 +1055,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement if (instanceId != null) { userVmVO = _vmDao.findById(instanceId); } - if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM && (userVmVO == null || !UserVmManager.CKS_NODE.equals(userVmVO.getUserVmType()))) { + if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM && (userVmVO == null || !UserVmManager.CKS_NODE.equals(userVmVO.getUserVmType()) || !UserVmManager.SHAREDFSVM.equals(userVmVO.getUserVmType()))) { throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported"); } } diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java index 29955066062..2a53021636c 100644 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java @@ -35,6 +35,7 @@ import org.apache.cloudstack.framework.jobs.AsyncJobDispatcher; import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.framework.jobs.dao.AsyncJobDao; import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; +import org.apache.cloudstack.jobs.JobInfo; import org.apache.cloudstack.managed.context.ManagedContextTimerTask; import org.springframework.stereotype.Component; @@ -47,7 +48,6 @@ import com.cloud.server.TaggedResourceService; import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotPolicyVO; import com.cloud.storage.SnapshotScheduleVO; -import com.cloud.storage.SnapshotVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.SnapshotPolicyDao; @@ -64,7 +64,6 @@ import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.TestClock; import com.cloud.utils.db.DB; import com.cloud.utils.db.GlobalLock; -import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.TransactionLegacy; import com.cloud.vm.snapshot.VMSnapshotManager; import com.cloud.vm.snapshot.VMSnapshotVO; @@ -144,7 +143,7 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu try { if (scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) { try { - checkStatusOfCurrentlyExecutingSnapshots(); + scheduleNextSnapshotJobsIfNecessary(); } finally { scanLock.unlock(); } @@ -174,70 +173,39 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu } } - private void checkStatusOfCurrentlyExecutingSnapshots() { - final SearchCriteria sc = _snapshotScheduleDao.createSearchCriteria(); - sc.addAnd("asyncJobId", SearchCriteria.Op.NNULL); - final List snapshotSchedules = _snapshotScheduleDao.search(sc, null); - for (final SnapshotScheduleVO snapshotSchedule : snapshotSchedules) { - final Long asyncJobId = snapshotSchedule.getAsyncJobId(); - final AsyncJobVO asyncJob = _asyncJobDao.findByIdIncludingRemoved(asyncJobId); - switch (asyncJob.getStatus()) { - case SUCCEEDED: - // The snapshot has been successfully backed up. - // The snapshot state has also been cleaned up. - // We can schedule the next job for this snapshot. - // Remove the existing entry in the snapshot_schedule table. - scheduleNextSnapshotJob(snapshotSchedule); - break; - case FAILED: - // Check the snapshot status. - final Long snapshotId = snapshotSchedule.getSnapshotId(); - if (snapshotId == null) { - // createSnapshotAsync exited, successfully or unsuccessfully, - // even before creating a snapshot record - // No cleanup needs to be done. - // Schedule the next snapshot. - scheduleNextSnapshotJob(snapshotSchedule); - } else { - final SnapshotVO snapshot = _snapshotDao.findById(snapshotId); - if (snapshot == null || snapshot.getRemoved() != null) { - // This snapshot has been deleted successfully from the primary storage - // Again no cleanup needs to be done. - // Schedule the next snapshot. - // There's very little probability that the code reaches this point. - // The snapshotId is a foreign key for the snapshot_schedule table - // set to ON DELETE CASCADE. So if the snapshot entry is deleted, the snapshot_schedule entry will be too. - // But what if it has only been marked as removed? - scheduleNextSnapshotJob(snapshotSchedule); - } else { - // The management server executing this snapshot job appears to have crashed - // while creating the snapshot on primary storage/or backing it up. - // We have no idea whether the snapshot was successfully taken on the primary or not. - // Schedule the next snapshot job. - // The ValidatePreviousSnapshotCommand will take appropriate action on this snapshot - // If the snapshot was taken successfully on primary, it will retry backing it up. - // and cleanup the previous snapshot - // Set the userId to that of system. - //_snapshotManager.validateSnapshot(1L, snapshot); - // In all cases, schedule the next snapshot job - scheduleNextSnapshotJob(snapshotSchedule); - } - } - - break; - case IN_PROGRESS: - // There is no way of knowing from here whether - // 1) Another management server is processing this snapshot job - // 2) The management server has crashed and this snapshot is lying - // around in an inconsistent state. - // Hopefully, this can be resolved at the backend when the current snapshot gets executed. - // But if it remains in this state, the current snapshot will not get executed. - // And it will remain in stasis. - break; - } + private void scheduleNextSnapshotJobsIfNecessary() { + List snapshotSchedules = _snapshotScheduleDao.getSchedulesAssignedWithAsyncJob(); + logger.info("Verifying the current state of [{}] snapshot schedules and scheduling next jobs, if necessary.", snapshotSchedules.size()); + for (SnapshotScheduleVO snapshotSchedule : snapshotSchedules) { + scheduleNextSnapshotJobIfNecessary(snapshotSchedule); } } + protected void scheduleNextSnapshotJobIfNecessary(SnapshotScheduleVO snapshotSchedule) { + Long asyncJobId = snapshotSchedule.getAsyncJobId(); + AsyncJobVO asyncJob = _asyncJobDao.findByIdIncludingRemoved(asyncJobId); + + if (asyncJob == null) { + logger.debug("The async job [{}] of snapshot schedule [{}] does not exist anymore. Considering it as finished and scheduling the next snapshot job.", + asyncJobId, snapshotSchedule); + scheduleNextSnapshotJob(snapshotSchedule); + return; + } + + JobInfo.Status status = asyncJob.getStatus(); + + if (JobInfo.Status.SUCCEEDED.equals(status)) { + logger.debug("Last job of schedule [{}] succeeded; scheduling the next snapshot job.", snapshotSchedule); + } else if (JobInfo.Status.FAILED.equals(status)) { + logger.debug("Last job of schedule [{}] failed with [{}]; scheduling a new snapshot job.", snapshotSchedule, asyncJob.getResult()); + } else { + logger.debug("Schedule [{}] is still in progress, skipping next job scheduling.", snapshotSchedule); + return; + } + + scheduleNextSnapshotJob(snapshotSchedule); + } + @DB protected void deleteExpiredVMSnapshots() { Date now = new Date(); diff --git a/server/src/main/java/com/cloud/storage/upload/UploadMonitorImpl.java b/server/src/main/java/com/cloud/storage/upload/UploadMonitorImpl.java index 76a724a428e..6b503ec3a50 100644 --- a/server/src/main/java/com/cloud/storage/upload/UploadMonitorImpl.java +++ b/server/src/main/java/com/cloud/storage/upload/UploadMonitorImpl.java @@ -259,7 +259,7 @@ public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { // Create Symlink at ssvm String path = vmTemplateHost.getInstallPath(); String uuid = UUID.randomUUID().toString() + "." + template.getFormat().getFileExtension(); // adding "." + vhd/ova... etc. - CreateEntityDownloadURLCommand cmd = new CreateEntityDownloadURLCommand(((ImageStoreEntity)store).getMountPoint(), path, uuid, null); + CreateEntityDownloadURLCommand cmd = new CreateEntityDownloadURLCommand(((ImageStoreEntity)store).getMountPoint(), path, uuid, null, null); Answer ans = ep.sendMessage(cmd); if (ans == null || !ans.getResult()) { errorString = "Unable to create a link for " + type + " id:" + template.getId() + "," + (ans == null ? "" : ans.getDetails()); @@ -315,7 +315,7 @@ public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { throw new CloudRuntimeException(errorString); } - CreateEntityDownloadURLCommand cmd = new CreateEntityDownloadURLCommand(((ImageStoreEntity)secStore).getMountPoint(), path, uuid, null); + CreateEntityDownloadURLCommand cmd = new CreateEntityDownloadURLCommand(((ImageStoreEntity)secStore).getMountPoint(), path, uuid, null, null); Answer ans = ep.sendMessage(cmd); if (ans == null || !ans.getResult()) { errorString = "Unable to create a link for " + type + " id:" + entityId + "," + (ans == null ? "" : ans.getDetails()); diff --git a/server/src/main/java/com/cloud/storage/upload/params/TemplateUploadParams.java b/server/src/main/java/com/cloud/storage/upload/params/TemplateUploadParams.java index 086038cf1c7..769aa3dc1f2 100644 --- a/server/src/main/java/com/cloud/storage/upload/params/TemplateUploadParams.java +++ b/server/src/main/java/com/cloud/storage/upload/params/TemplateUploadParams.java @@ -16,13 +16,14 @@ // under the License. package com.cloud.storage.upload.params; +import com.cloud.cpu.CPU; import com.cloud.hypervisor.Hypervisor; import java.util.Map; public class TemplateUploadParams extends UploadParamsBase { - public TemplateUploadParams(long userId, String name, String displayText, + public TemplateUploadParams(long userId, String name, String displayText, CPU.CPUArch arch, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, @@ -30,7 +31,7 @@ public class TemplateUploadParams extends UploadParamsBase { String templateTag, long templateOwnerId, Map details, Boolean sshkeyEnabled, Boolean isDynamicallyScalable, Boolean isRoutingType, boolean deployAsIs, boolean forCks) { - super(userId, name, displayText, bits, passwordEnabled, requiresHVM, isPublic, featured, isExtractable, + super(userId, name, displayText, arch, bits, passwordEnabled, requiresHVM, isPublic, featured, isExtractable, format, guestOSId, zoneId, hypervisorType, chksum, templateTag, templateOwnerId, details, sshkeyEnabled, isDynamicallyScalable, isRoutingType, deployAsIs, forCks); setBootable(true); diff --git a/server/src/main/java/com/cloud/storage/upload/params/UploadParams.java b/server/src/main/java/com/cloud/storage/upload/params/UploadParams.java index be8319c9e57..7be526d780d 100644 --- a/server/src/main/java/com/cloud/storage/upload/params/UploadParams.java +++ b/server/src/main/java/com/cloud/storage/upload/params/UploadParams.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.storage.upload.params; +import com.cloud.cpu.CPU; import com.cloud.hypervisor.Hypervisor; import java.util.Map; @@ -47,4 +48,5 @@ public interface UploadParams { boolean isRoutingType(); boolean isDirectDownload(); boolean isDeployAsIs(); + CPU.CPUArch getArch(); } diff --git a/server/src/main/java/com/cloud/storage/upload/params/UploadParamsBase.java b/server/src/main/java/com/cloud/storage/upload/params/UploadParamsBase.java index 11fec78d8e7..c3499d75c3b 100644 --- a/server/src/main/java/com/cloud/storage/upload/params/UploadParamsBase.java +++ b/server/src/main/java/com/cloud/storage/upload/params/UploadParamsBase.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.storage.upload.params; +import com.cloud.cpu.CPU; import com.cloud.hypervisor.Hypervisor; import java.util.Map; @@ -46,8 +47,9 @@ public abstract class UploadParamsBase implements UploadParams { private boolean isRoutingType; private boolean deployAsIs; private boolean forCks; + private CPU.CPUArch arch; - UploadParamsBase(long userId, String name, String displayText, + UploadParamsBase(long userId, String name, String displayText, CPU.CPUArch arch, Integer bits, boolean passwordEnabled, boolean requiresHVM, boolean isPublic, boolean featured, boolean isExtractable, String format, Long guestOSId, @@ -58,6 +60,7 @@ public abstract class UploadParamsBase implements UploadParams { this.userId = userId; this.name = name; this.displayText = displayText; + this.arch = arch; this.bits = bits; this.passwordEnabled = passwordEnabled; this.requiresHVM = requiresHVM; @@ -249,4 +252,13 @@ public abstract class UploadParamsBase implements UploadParams { void setHypervisorType(Hypervisor.HypervisorType hypervisorType) { this.hypervisorType = hypervisorType; } + + @Override + public CPU.CPUArch getArch() { + return arch; + } + + public void setArch(CPU.CPUArch arch) { + this.arch = arch; + } } diff --git a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java index 99ef3a95092..8cd27e6c977 100644 --- a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java +++ b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java @@ -35,12 +35,14 @@ import org.apache.cloudstack.agent.directdownload.CheckUrlAnswer; import org.apache.cloudstack.agent.directdownload.CheckUrlCommand; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; +import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd; import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd; import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd; import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; +import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.direct.download.DirectDownloadManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -202,7 +204,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { public TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationException { TemplateProfile profile = super.prepare(cmd); String url = profile.getUrl(); - UriUtils.validateUrl(ImageFormat.ISO.getFileExtension(), url); + UriUtils.validateUrl(ImageFormat.ISO.getFileExtension(), url, !TemplateManager.getValidateUrlIsResolvableBeforeRegisteringTemplateValue(), false); boolean followRedirects = StorageManager.DataStoreDownloadFollowRedirects.value(); if (cmd.isDirectDownload()) { DigestHelper.validateChecksumString(cmd.getChecksum()); @@ -236,7 +238,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocationException { TemplateProfile profile = super.prepare(cmd); String url = profile.getUrl(); - UriUtils.validateUrl(cmd.getFormat(), url, cmd.isDirectDownload()); + UriUtils.validateUrl(cmd.getFormat(), url, !TemplateManager.getValidateUrlIsResolvableBeforeRegisteringTemplateValue(), cmd.isDirectDownload()); Hypervisor.HypervisorType hypervisor = Hypervisor.HypervisorType.getType(cmd.getHypervisor()); boolean followRedirects = StorageManager.DataStoreDownloadFollowRedirects.value(); if (cmd.isDirectDownload()) { @@ -420,6 +422,16 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { if (zoneIdList.size() > 1) throw new CloudRuntimeException("Operation is not supported for more than one zone id at a time."); + // Set Event Details for Template/ISO Upload + String eventType = template.getFormat().equals(ImageFormat.ISO) ? "Iso" : "Template"; + String eventResourceId = template.getUuid(); + CallContext.current().setEventDetails(String.format("%s Id: %s", eventType, eventResourceId)); + CallContext.current().putContextParameter(eventType.equals("Iso") ? eventType : VirtualMachineTemplate.class, eventResourceId); + if (template.getFormat().equals(ImageFormat.ISO)) { + CallContext.current().setEventResourceType(ApiCommandResourceType.Iso); + CallContext.current().setEventResourceId(template.getId()); + } + Long zoneId = zoneIdList.get(0); DataStore imageStore = verifyHeuristicRulesForZone(template, zoneId); List payloads = new LinkedList<>(); @@ -753,8 +765,8 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { public TemplateProfile prepareDelete(DeleteTemplateCmd cmd) { TemplateProfile profile = super.prepareDelete(cmd); VMTemplateVO template = profile.getTemplate(); - if (template.getTemplateType() == TemplateType.SYSTEM) { - throw new InvalidParameterValueException("The DomR template cannot be deleted."); + if (template.getTemplateType() == TemplateType.SYSTEM && !cmd.getIsSystem()) { + throw new InvalidParameterValueException("Could not delete template as it is a SYSTEM template and isSystem is set to false."); } checkZoneImageStores(profile.getTemplate(), profile.getZoneIdList()); return profile; diff --git a/server/src/main/java/com/cloud/template/TemplateAdapter.java b/server/src/main/java/com/cloud/template/TemplateAdapter.java index 7962f552251..32a8db515aa 100644 --- a/server/src/main/java/com/cloud/template/TemplateAdapter.java +++ b/server/src/main/java/com/cloud/template/TemplateAdapter.java @@ -19,6 +19,7 @@ package com.cloud.template; import java.util.List; import java.util.Map; +import com.cloud.cpu.CPU; import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd; import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; @@ -72,11 +73,11 @@ public interface TemplateAdapter extends Adapter { boolean delete(TemplateProfile profile); - TemplateProfile prepare(boolean isIso, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, Boolean isPublic, + TemplateProfile prepare(boolean isIso, Long userId, String name, String displayText, CPU.CPUArch arch, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneId, HypervisorType hypervisorType, String accountName, Long domainId, String chksum, Boolean bootable, Map details, boolean directDownload, boolean deployAsIs) throws ResourceAllocationException; - TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, Boolean isPublic, + TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, CPU.CPUArch arch, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneId, HypervisorType hypervisorType, String chksum, Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshKeyEnabled, String imageStoreUuid, Boolean isDynamicallyScalable, TemplateType templateType, boolean directDownload, boolean deployAsIs, boolean forCks) throws ResourceAllocationException; diff --git a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java index a46fe0a5412..b99cb730c34 100644 --- a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java +++ b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java @@ -24,6 +24,7 @@ import java.util.Map; import javax.inject.Inject; +import com.cloud.cpu.CPU; import com.cloud.deployasis.DeployAsIsConstants; import com.cloud.storage.upload.params.IsoUploadParams; import com.cloud.storage.upload.params.TemplateUploadParams; @@ -130,18 +131,18 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat } @Override - public TemplateProfile prepare(boolean isIso, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, - Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneId, HypervisorType hypervisorType, String accountName, - Long domainId, String chksum, Boolean bootable, Map details, boolean directDownload, boolean deployAsIs) throws ResourceAllocationException { - return prepare(isIso, userId, name, displayText, bits, passwordEnabled, requiresHVM, url, isPublic, featured, isExtractable, format, guestOSId, zoneId, + public TemplateProfile prepare(boolean isIso, Long userId, String name, String displayText, CPU.CPUArch arch, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, + Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneId, HypervisorType hypervisorType, String accountName, + Long domainId, String chksum, Boolean bootable, Map details, boolean directDownload, boolean deployAsIs) throws ResourceAllocationException { + return prepare(isIso, userId, name, displayText, arch, bits, passwordEnabled, requiresHVM, url, isPublic, featured, isExtractable, format, guestOSId, zoneId, hypervisorType, chksum, bootable, null, null, details, false, null, false, TemplateType.USER, directDownload, deployAsIs, false); } @Override - public TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, - Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneIdList, HypervisorType hypervisorType, String chksum, - Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshkeyEnabled, String imageStoreUuid, Boolean isDynamicallyScalable, - TemplateType templateType, boolean directDownload, boolean deployAsIs, boolean forCks) throws ResourceAllocationException { + public TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, CPU.CPUArch arch, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, + Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List zoneIdList, HypervisorType hypervisorType, String chksum, + Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshkeyEnabled, String imageStoreUuid, Boolean isDynamicallyScalable, + TemplateType templateType, boolean directDownload, boolean deployAsIs, boolean forCks) throws ResourceAllocationException { //Long accountId = null; // parameters verification @@ -262,7 +263,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat Long id = _tmpltDao.getNextInSequence(Long.class, "id"); CallContext.current().setEventDetails("Id: " + id + " name: " + name); - TemplateProfile profile = new TemplateProfile(id, userId, name, displayText, bits, passwordEnabled, requiresHVM, url, isPublic, featured, isExtractable, imgfmt, guestOSId, zoneIdList, + TemplateProfile profile = new TemplateProfile(id, userId, name, displayText, arch, bits, passwordEnabled, requiresHVM, url, isPublic, featured, isExtractable, imgfmt, guestOSId, zoneIdList, hypervisorType, templateOwner.getAccountName(), templateOwner.getDomainId(), templateOwner.getAccountId(), chksum, bootable, templateTag, details, sshkeyEnabled, null, isDynamicallyScalable, templateType, directDownload, deployAsIs); profile.setForCks(forCks); @@ -308,7 +309,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat } } } - return prepare(false, CallContext.current().getCallingUserId(), cmd.getTemplateName(), cmd.getDisplayText(), cmd.getBits(), cmd.isPasswordEnabled(), cmd.getRequiresHvm(), + return prepare(false, CallContext.current().getCallingUserId(), cmd.getTemplateName(), cmd.getDisplayText(), cmd.getArch(), cmd.getBits(), cmd.isPasswordEnabled(), cmd.getRequiresHvm(), cmd.getUrl(), cmd.isPublic(), cmd.isFeatured(), cmd.isExtractable(), cmd.getFormat(), cmd.getOsTypeId(), zoneId, hypervisorType, cmd.getChecksum(), true, cmd.getTemplateTag(), owner, details, cmd.isSshKeyEnabled(), null, cmd.isDynamicallyScalable(), templateType, cmd.isDirectDownload(), cmd.isDeployAsIs(), cmd.isForCks()); @@ -339,7 +340,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat StringUtils.join(Arrays.stream(HypervisorType.values()).filter(h -> h != HypervisorType.None).map(HypervisorType::name).toArray(), ", "))); } - return prepare(params.isIso(), params.getUserId(), params.getName(), params.getDisplayText(), params.getBits(), + return prepare(params.isIso(), params.getUserId(), params.getName(), params.getDisplayText(), params.getArch(), params.getBits(), params.isPasswordEnabled(), params.requiresHVM(), params.getUrl(), params.isPublic(), params.isFeatured(), params.isExtractable(), params.getFormat(), params.getGuestOSId(), zoneList, params.getHypervisorType(), params.getChecksum(), params.isBootable(), params.getTemplateTag(), owner, @@ -360,7 +361,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat osTypeId = getDefaultDeployAsIsGuestOsId(); } UploadParams params = new TemplateUploadParams(CallContext.current().getCallingUserId(), cmd.getName(), - cmd.getDisplayText(), cmd.getBits(), BooleanUtils.toBoolean(cmd.isPasswordEnabled()), + cmd.getDisplayText(), cmd.getArch(), cmd.getBits(), BooleanUtils.toBoolean(cmd.isPasswordEnabled()), BooleanUtils.toBoolean(cmd.getRequiresHvm()), BooleanUtils.toBoolean(cmd.isPublic()), BooleanUtils.toBoolean(cmd.isFeatured()), BooleanUtils.toBoolean(cmd.isExtractable()), cmd.getFormat(), osTypeId, cmd.getZoneId(), HypervisorType.getType(cmd.getHypervisor()), cmd.getChecksum(), @@ -394,7 +395,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat zoneList.add(zoneId); } - return prepare(true, CallContext.current().getCallingUserId(), cmd.getIsoName(), cmd.getDisplayText(), 64, cmd.isPasswordEnabled(), true, cmd.getUrl(), cmd.isPublic(), + return prepare(true, CallContext.current().getCallingUserId(), cmd.getIsoName(), cmd.getDisplayText(), cmd.getArch(), 64, cmd.isPasswordEnabled(), true, cmd.getUrl(), cmd.isPublic(), cmd.isFeatured(), cmd.isExtractable(), ImageFormat.ISO.toString(), cmd.getOsTypeId(), zoneList, HypervisorType.None, cmd.getChecksum(), cmd.isBootable(), null, owner, null, false, cmd.getImageStoreUuid(), cmd.isDynamicallyScalable(), TemplateType.USER, cmd.isDirectDownload(), false, false); } @@ -405,7 +406,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat new VMTemplateVO(profile.getTemplateId(), profile.getName(), profile.getFormat(), profile.isPublic(), profile.isFeatured(), profile.isExtractable(), profile.getTemplateType(), profile.getUrl(), profile.isRequiresHVM(), profile.getBits(), profile.getAccountId(), profile.getCheckSum(), profile.getDisplayText(), profile.isPasswordEnabled(), profile.getGuestOsId(), profile.isBootable(), profile.getHypervisorType(), - profile.getTemplateTag(), profile.getDetails(), profile.isSshKeyEnabled(), profile.IsDynamicallyScalable(), profile.isDirectDownload(), profile.isDeployAsIs()); + profile.getTemplateTag(), profile.getDetails(), profile.isSshKeyEnabled(), profile.IsDynamicallyScalable(), profile.isDirectDownload(), profile.isDeployAsIs(), profile.getArch()); template.setState(initialState); template.setForCks(profile.isForCks()); diff --git a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java index 7c15c8e369f..30af960721d 100755 --- a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java @@ -34,6 +34,7 @@ import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.cpu.CPU; import com.cloud.vm.VirtualMachine; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ApiConstants; @@ -189,6 +190,7 @@ import com.cloud.user.AccountManager; import com.cloud.user.AccountService; import com.cloud.user.AccountVO; import com.cloud.user.ResourceLimitService; +import com.cloud.user.User; import com.cloud.user.UserData; import com.cloud.user.dao.AccountDao; import com.cloud.uservm.UserVm; @@ -207,6 +209,7 @@ import com.cloud.utils.db.Transaction; import com.cloud.utils.db.TransactionCallbackNoReturn; import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.UserVmManager; import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine.State; @@ -298,7 +301,6 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, @Inject private HypervisorGuruManager _hvGuruMgr; - private boolean _disableExtraction = false; private List _adapters; ExecutorService _preloadExecutor; @@ -539,7 +541,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, if (isISO) { desc = Upload.Type.ISO.toString(); } - if (!_accountMgr.isRootAdmin(caller.getId()) && _disableExtraction) { + if (!_accountMgr.isRootAdmin(caller.getId()) && ApiDBUtils.isExtractionDisabled()) { throw new PermissionDeniedException("Extraction has been disabled by admin"); } @@ -1112,10 +1114,6 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, @Override public boolean configure(String name, Map params) throws ConfigurationException { - - String disableExtraction = _configDao.getValue(Config.DisableExtraction.toString()); - _disableExtraction = (disableExtraction == null) ? false : Boolean.parseBoolean(disableExtraction); - _preloadExecutor = Executors.newFixedThreadPool(TemplatePreloaderPoolSize.value(), new NamedThreadFactory("Template-Preloader")); return true; @@ -1200,6 +1198,9 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, throw new InvalidParameterValueException("Unable to find a virtual machine with id " + vmId); } } + if (vm instanceof UserVm && UserVmManager.SHAREDFSVM.equals(((UserVm) vm).getUserVmType())) { + throw new InvalidParameterValueException("Operation not supported on Shared FileSystem Instance"); + } VMTemplateVO iso = _tmpltDao.findById(isoId); if (iso == null || iso.getRemoved() != null) { @@ -1457,6 +1458,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, // Input validation final Long id = cmd.getId(); final Account caller = CallContext.current().getCallingAccount(); + final User user = CallContext.current().getCallingUser(); List accountNames = cmd.getAccountNames(); List projectIds = cmd.getProjectIds(); Boolean isFeatured = cmd.isFeatured(); @@ -1526,9 +1528,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } if (owner.getType() == Account.Type.PROJECT) { - // Currently project owned templates cannot be shared outside project but is available to all users within project by default. - throw new InvalidParameterValueException("Update template permissions is an invalid operation on template " + template.getName() + - ". Project owned templates cannot be shared outside template."); + // if it is a project owned template/iso, the user must at least have access to be allowed to share it. + _accountMgr.checkAccess(user, template); } // check configuration parameter(allow.public.user.templates) value for @@ -1929,9 +1930,13 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, String description = cmd.getDisplayText(); boolean isExtractable = false; Long sourceTemplateId = null; + CPU.CPUArch arch = CPU.CPUArch.amd64; if (volume != null) { VMTemplateVO template = ApiDBUtils.findTemplateById(volume.getTemplateId()); isExtractable = template != null && template.isExtractable() && template.getTemplateType() != Storage.TemplateType.SYSTEM; + if (template != null) { + arch = template.getArch(); + } if (volume.getIsoId() != null && volume.getIsoId() != 0) { sourceTemplateId = volume.getIsoId(); } else if (volume.getTemplateId() != null) { @@ -1946,7 +1951,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } privateTemplate = new VMTemplateVO(nextTemplateId, name, ImageFormat.RAW, isPublic, featured, isExtractable, TemplateType.USER, null, requiresHvmValue, bitsValue, templateOwner.getId(), null, description, - passwordEnabledValue, guestOS.getId(), true, hyperType, templateTag, cmd.getDetails(), sshKeyEnabledValue, isDynamicScalingEnabled, false, false); + passwordEnabledValue, guestOS.getId(), true, hyperType, templateTag, cmd.getDetails(), sshKeyEnabledValue, isDynamicScalingEnabled, false, false, arch); if (sourceTemplateId != null) { if (logger.isDebugEnabled()) { @@ -2125,6 +2130,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, Account account = CallContext.current().getCallingAccount(); boolean cleanupDetails = cmd.isCleanupDetails(); boolean forCks = cmd instanceof UpdateTemplateCmd && ((UpdateTemplateCmd) cmd).getForCks(); + CPU.CPUArch arch = cmd.getCPUArch(); // verify that template exists VMTemplateVO template = _tmpltDao.findById(id); @@ -2173,6 +2179,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, isRoutingTemplate == null && templateType == null && templateTag == null && + arch == null && (! cleanupDetails && details == null) //update details in every case except this one ); if (!updateNeeded) { @@ -2247,6 +2254,10 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, template.setDynamicallyScalable(isDynamicallyScalable); } + if (arch != null) { + template.setArch(arch); + } + if (isRoutingTemplate != null) { if (isRoutingTemplate) { template.setTemplateType(TemplateType.ROUTING); @@ -2366,7 +2377,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] {AllowPublicUserTemplates, TemplatePreloaderPoolSize}; + return new ConfigKey[] {AllowPublicUserTemplates, TemplatePreloaderPoolSize, ValidateUrlIsResolvableBeforeRegisteringTemplate}; } public List getTemplateAdapters() { diff --git a/server/src/main/java/com/cloud/usage/UsageServiceImpl.java b/server/src/main/java/com/cloud/usage/UsageServiceImpl.java index 170ef1fdbbc..421d2587441 100644 --- a/server/src/main/java/com/cloud/usage/UsageServiceImpl.java +++ b/server/src/main/java/com/cloud/usage/UsageServiceImpl.java @@ -31,7 +31,6 @@ import com.cloud.utils.DateUtil; import org.apache.cloudstack.api.command.admin.usage.GenerateUsageRecordsCmd; import org.apache.cloudstack.api.command.admin.usage.ListUsageRecordsCmd; import org.apache.cloudstack.api.command.admin.usage.RemoveRawUsageRecordsCmd; -import org.apache.cloudstack.api.response.UsageTypeResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.usage.Usage; @@ -485,10 +484,4 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag } return true; } - - @Override - public List listUsageTypes() { - return UsageTypes.listUsageTypes(); - } - } diff --git a/server/src/main/java/com/cloud/user/AccountManager.java b/server/src/main/java/com/cloud/user/AccountManager.java index 6d2d1db5668..1e5526688b7 100644 --- a/server/src/main/java/com/cloud/user/AccountManager.java +++ b/server/src/main/java/com/cloud/user/AccountManager.java @@ -199,4 +199,8 @@ public interface AccountManager extends AccountService, Configurable { UserTwoFactorAuthenticationSetupResponse setupUserTwoFactorAuthentication(SetupUserTwoFactorAuthenticationCmd cmd); List getApiNameList(); + + void validateUserPasswordAndUpdateIfNeeded(String newPassword, UserVO user, String currentPassword, boolean skipCurrentPassValidation); + + void checkApiAccess(Account caller, String command); } diff --git a/server/src/main/java/com/cloud/user/AccountManagerImpl.java b/server/src/main/java/com/cloud/user/AccountManagerImpl.java index d1894b8fc47..78234497cd0 100644 --- a/server/src/main/java/com/cloud/user/AccountManagerImpl.java +++ b/server/src/main/java/com/cloud/user/AccountManagerImpl.java @@ -73,15 +73,18 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.network.RoutedIpv4Manager; import org.apache.cloudstack.region.gslb.GlobalLoadBalancerRuleDao; import org.apache.cloudstack.resourcedetail.UserDetailVO; import org.apache.cloudstack.resourcedetail.dao.UserDetailsDao; import org.apache.cloudstack.utils.baremetal.BaremetalUtils; +import org.apache.cloudstack.webhook.WebhookHelper; import org.apache.commons.codec.binary.Base64; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.StringUtils; import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.NoSuchBeanDefinitionException; import com.cloud.api.ApiDBUtils; import com.cloud.api.auth.SetupUserTwoFactorAuthenticationCmd; @@ -168,6 +171,7 @@ import com.cloud.utils.ConstantTimeComparator; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.component.PluggableService; @@ -317,6 +321,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M private IpAddressManager _ipAddrMgr; @Inject private RoleService roleService; + @Inject + private RoutedIpv4Manager routedIpv4Manager; @Inject private PasswordPolicy passwordPolicy; @@ -330,6 +336,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M private List _securityCheckers; private int _cleanupInterval; + private static final String OAUTH2_PROVIDER_NAME = "oauth2"; private List apiNameList; protected static Map userTwoFactorAuthenticationProvidersMap = new HashMap<>(); @@ -425,6 +432,15 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M _querySelectors = querySelectors; } + protected void deleteWebhooksForAccount(long accountId) { + try { + WebhookHelper webhookService = ComponentContext.getDelegateComponentOfType(WebhookHelper.class); + webhookService.deleteWebhooksForAccount(accountId); + } catch (NoSuchBeanDefinitionException ignored) { + logger.debug("No WebhookHelper bean found"); + } + } + @Override public List getApiNameList() { return apiNameList; @@ -1054,6 +1070,12 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } } + // remove dedicated IPv4 subnets + routedIpv4Manager.removeIpv4SubnetsForZoneByAccountId(accountId); + + // remove dedicated BGP peers + routedIpv4Manager.removeBgpPeersByAccountId(accountId); + // release account specific guest vlans List maps = _accountGuestVlanMapDao.listAccountGuestVlanMapsByAccount(accountId); for (AccountGuestVlanMapVO map : maps) { @@ -1104,6 +1126,9 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // Delete registered UserData userDataDao.removeByAccountId(accountId); + // Delete Webhooks + deleteWebhooksForAccount(accountId); + return true; } catch (Exception ex) { logger.warn("Failed to cleanup account " + account + " due to ", ex); @@ -1353,6 +1378,12 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } } + @Override + public void checkApiAccess(Account caller, String command) { + List apiCheckers = getEnabledApiCheckers(); + checkApiAccess(apiCheckers, caller, command); + } + @NotNull private List getEnabledApiCheckers() { // we are really only interested in the dynamic access checker @@ -1424,7 +1455,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M validateAndUpdateLastNameIfNeeded(updateUserCmd, user); validateAndUpdateUsernameIfNeeded(updateUserCmd, user, account); - validateUserPasswordAndUpdateIfNeeded(updateUserCmd.getPassword(), user, updateUserCmd.getCurrentPassword()); + validateUserPasswordAndUpdateIfNeeded(updateUserCmd.getPassword(), user, updateUserCmd.getCurrentPassword(), false); String email = updateUserCmd.getEmail(); if (StringUtils.isNotBlank(email)) { user.setEmail(email); @@ -1452,7 +1483,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M * * If all checks pass, we encode the given password with the most preferable password mechanism given in {@link #_userPasswordEncoders}. */ - protected void validateUserPasswordAndUpdateIfNeeded(String newPassword, UserVO user, String currentPassword) { + public void validateUserPasswordAndUpdateIfNeeded(String newPassword, UserVO user, String currentPassword, boolean skipCurrentPassValidation) { if (newPassword == null) { logger.trace("No new password to update for user: " + user.getUuid()); return; @@ -1467,16 +1498,17 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M boolean isRootAdminExecutingPasswordUpdate = callingAccount.getId() == Account.ACCOUNT_ID_SYSTEM || isRootAdmin(callingAccount.getId()); boolean isDomainAdmin = isDomainAdmin(callingAccount.getId()); boolean isAdmin = isDomainAdmin || isRootAdminExecutingPasswordUpdate; + boolean skipValidation = isAdmin || skipCurrentPassValidation; if (isAdmin) { logger.trace(String.format("Admin account [uuid=%s] executing password update for user [%s] ", callingAccount.getUuid(), user.getUuid())); } - if (!isAdmin && StringUtils.isBlank(currentPassword)) { + if (!skipValidation && StringUtils.isBlank(currentPassword)) { throw new InvalidParameterValueException("To set a new password the current password must be provided."); } if (CollectionUtils.isEmpty(_userPasswordEncoders)) { throw new CloudRuntimeException("No user authenticators configured!"); } - if (!isAdmin) { + if (!skipValidation) { validateCurrentPassword(user, currentPassword); } UserAuthenticator userAuthenticator = _userPasswordEncoders.get(0); @@ -1820,7 +1852,14 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // If the user is a System user, return an error. We do not allow this AccountVO account = _accountDao.findById(accountId); - if (! isDeleteNeeded(account, accountId, caller)) { + if (caller.getId() == accountId) { + Domain domain = _domainDao.findById(account.getDomainId()); + throw new InvalidParameterValueException(String.format("Deletion of your own account is not allowed. To delete account %s (ID: %s, Domain: %s), " + + "request to another user with permissions to perform the operation.", + account.getAccountName(), account.getUuid(), domain.getUuid())); + } + + if (!isDeleteNeeded(account, accountId, caller)) { return true; } @@ -1840,7 +1879,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M return deleteAccount(account, callerUserId, caller); } - private boolean isDeleteNeeded(AccountVO account, long accountId, Account caller) { + protected boolean isDeleteNeeded(AccountVO account, long accountId, Account caller) { if (account == null) { logger.info(String.format("The account, identified by id %d, doesn't exist", accountId )); return false; @@ -2660,7 +2699,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M continue; } } - if (secretCode != null && !authenticator.getName().equals("oauth2")) { + if ((secretCode != null && !authenticator.getName().equals(OAUTH2_PROVIDER_NAME)) + || (secretCode == null && authenticator.getName().equals(OAUTH2_PROVIDER_NAME))) { continue; } Pair result = authenticator.authenticate(username, password, domainId, requestParameters); @@ -2752,7 +2792,9 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M throw new InvalidParameterValueException("Unable to find user by id"); } final ControlledEntity account = getAccount(getUserAccountById(userId).getAccountId()); //Extracting the Account from the userID of the requested user. - checkAccess(CallContext.current().getCallingUser(), account); + User caller = CallContext.current().getCallingUser(); + preventRootDomainAdminAccessToRootAdminKeys(caller, account); + checkAccess(caller, account); Map keys = new HashMap(); keys.put("apikey", user.getApiKey()); @@ -2761,6 +2803,19 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M return keys; } + protected void preventRootDomainAdminAccessToRootAdminKeys(User caller, ControlledEntity account) { + if (isDomainAdminForRootDomain(caller) && isRootAdmin(account.getAccountId())) { + String msg = String.format("Caller Username %s does not have access to root admin keys", caller.getUsername()); + logger.error(msg); + throw new PermissionDeniedException(msg); + } + } + + protected boolean isDomainAdminForRootDomain(User callingUser) { + AccountVO caller = _accountDao.findById(callingUser.getAccountId()); + return caller.getType() == Account.Type.DOMAIN_ADMIN && caller.getDomainId() == Domain.ROOT_DOMAIN; + } + @Override public List listUserTwoFactorAuthenticationProviders() { return userTwoFactorAuthenticationProviders; @@ -2795,6 +2850,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } Account account = _accountDao.findById(user.getAccountId()); + preventRootDomainAdminAccessToRootAdminKeys(user, account); checkAccess(caller, null, true, account); // don't allow updating system user diff --git a/server/src/main/java/com/cloud/user/DomainManagerImpl.java b/server/src/main/java/com/cloud/user/DomainManagerImpl.java index 51705e63f3a..4a81772d6d7 100644 --- a/server/src/main/java/com/cloud/user/DomainManagerImpl.java +++ b/server/src/main/java/com/cloud/user/DomainManagerImpl.java @@ -51,6 +51,7 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; +import org.apache.cloudstack.network.RoutedIpv4Manager; import org.apache.cloudstack.region.RegionManager; import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao; import org.apache.commons.collections.CollectionUtils; @@ -161,6 +162,8 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom private ResourceLimitService resourceLimitService; @Inject private AffinityGroupDomainMapDao affinityGroupDomainMapDao; + @Inject + private RoutedIpv4Manager routedIpv4Manager; @Inject MessageBus _messageBus; @@ -393,6 +396,12 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom removeDomainWithNoAccountsForCleanupNetworksOrDedicatedResources(domain); } + // remove dedicated IPv4 subnets + routedIpv4Manager.removeIpv4SubnetsForZoneByDomainId(domain.getId()); + + // remove dedicated BGP peers + routedIpv4Manager.removeBgpPeersByDomainId(domain.getId()); + if (!_configMgr.releaseDomainSpecificVirtualRanges(domain.getId())) { CloudRuntimeException e = new CloudRuntimeException("Can't delete the domain yet because failed to release domain specific virtual ip ranges"); e.addProxyObject(domain.getUuid(), "domainId"); diff --git a/server/src/main/java/com/cloud/vm/UserVmManager.java b/server/src/main/java/com/cloud/vm/UserVmManager.java index b107a520205..f2a8a672d42 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManager.java +++ b/server/src/main/java/com/cloud/vm/UserVmManager.java @@ -17,9 +17,12 @@ package com.cloud.vm; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import com.cloud.utils.StringUtils; import org.apache.cloudstack.api.BaseCmd.HTTPMethod; import org.apache.cloudstack.framework.config.ConfigKey; @@ -38,6 +41,8 @@ import com.cloud.template.VirtualMachineTemplate; import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; +import static com.cloud.user.ResourceLimitService.ResourceLimitHostTags; + /** * * @@ -46,12 +51,15 @@ public interface UserVmManager extends UserVmService { String EnableDynamicallyScaleVmCK = "enable.dynamic.scale.vm"; String AllowDiskOfferingChangeDuringScaleVmCK = "allow.diskoffering.change.during.scale.vm"; String AllowUserExpungeRecoverVmCK ="allow.user.expunge.recover.vm"; + String AllowUserForceStopVmCK = "allow.user.force.stop.vm"; ConfigKey EnableDynamicallyScaleVm = new ConfigKey("Advanced", Boolean.class, EnableDynamicallyScaleVmCK, "false", "Enables/Disables dynamically scaling a vm", true, ConfigKey.Scope.Zone); ConfigKey AllowDiskOfferingChangeDuringScaleVm = new ConfigKey("Advanced", Boolean.class, AllowDiskOfferingChangeDuringScaleVmCK, "false", "Determines whether to allow or disallow disk offering change for root volume during scaling of a stopped or running vm", true, ConfigKey.Scope.Zone); ConfigKey AllowUserExpungeRecoverVm = new ConfigKey("Advanced", Boolean.class, AllowUserExpungeRecoverVmCK, "false", "Determines whether users can expunge or recover their vm", true, ConfigKey.Scope.Account); + ConfigKey AllowUserForceStopVm = new ConfigKey("Advanced", Boolean.class, AllowUserForceStopVmCK, "true", + "Determines whether users are allowed to force stop a vm", true, ConfigKey.Scope.Account); ConfigKey DisplayVMOVFProperties = new ConfigKey("Advanced", Boolean.class, "vm.display.ovf.properties", "false", "Set display of VMs OVF properties as part of VM details", true); @@ -59,9 +67,26 @@ public interface UserVmManager extends UserVmService { "Destroys the VM's root volume when the VM is destroyed.", true, ConfigKey.Scope.Domain); + ConfigKey StrictHostTags = new ConfigKey<>( + "Advanced", + String.class, + "vm.strict.host.tags", + "", + "A comma-separated list of tags which must match during operations like modifying the compute" + + "offering for an instance, and starting or live migrating an instance to a specific host.", + true); + ConfigKey EnforceStrictResourceLimitHostTagCheck = new ConfigKey( + "Advanced", + Boolean.class, + "vm.strict.resource.limit.host.tag.check", + "true", + "If set to true, tags specified in `resource.limit.host.tags` are also included in vm.strict.host.tags.", + true); + static final int MAX_USER_DATA_LENGTH_BYTES = 2048; public static final String CKS_NODE = "cksnode"; + public static final String SHAREDFSVM = "sharedfsvm"; /** * @param hostId get all of the virtual machines that belong to one host. @@ -95,8 +120,6 @@ public interface UserVmManager extends UserVmService { String finalizeUserData(String userData, Long userDataId, VirtualMachineTemplate template); - String validateUserData(String userData, HTTPMethod httpmethod); - void validateExtraConfig(long accountId, HypervisorType hypervisorType, String extraConfig); boolean isVMUsingLocalStorage(VMInstanceVO vm); @@ -118,8 +141,14 @@ public interface UserVmManager extends UserVmService { boolean setupVmForPvlan(boolean add, Long hostId, NicProfile nic); - UserVm updateVirtualMachine(long id, String displayName, String group, Boolean ha, Boolean isDisplayVmEnabled, Long osTypeId, String userData, - Long userDataId, String userDataDetails, Boolean isDynamicallyScalable, HTTPMethod httpMethod, String customId, String hostName, String instanceName, List securityGroupIdList, Map> extraDhcpOptionsMap) throws ResourceUnavailableException, InsufficientCapacityException; + UserVm updateVirtualMachine(long id, String displayName, String group, Boolean ha, + Boolean isDisplayVmEnabled, Boolean deleteProtection, + Long osTypeId, String userData, Long userDataId, + String userDataDetails, Boolean isDynamicallyScalable, + HTTPMethod httpMethod, String customId, String hostName, + String instanceName, List securityGroupIdList, + Map> extraDhcpOptionsMap + ) throws ResourceUnavailableException, InsufficientCapacityException; //the validateCustomParameters, save and remove CustomOfferingDetils functions can be removed from the interface once we can //find a common place for all the scaling and upgrading code of both user and systemvms. @@ -127,6 +156,18 @@ public interface UserVmManager extends UserVmService { void generateUsageEvent(VirtualMachine vm, boolean isDisplay, String eventType); + static Set getStrictHostTags() { + String strictHostTags = StrictHostTags.value(); + Set strictHostTagsSet = new HashSet<>(); + if (StringUtils.isNotEmpty(strictHostTags)) { + strictHostTagsSet.addAll(List.of(strictHostTags.split(","))); + } + if (EnforceStrictResourceLimitHostTagCheck.value() && StringUtils.isNotEmpty(ResourceLimitHostTags.value())) { + strictHostTagsSet.addAll(List.of(ResourceLimitHostTags.value().split(","))); + } + return strictHostTagsSet; + } + void persistDeviceBusInfo(UserVmVO paramUserVmVO, String paramString); boolean checkIfDynamicScalingCanBeEnabled(VirtualMachine vm, ServiceOffering offering, VirtualMachineTemplate template, Long zoneId); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index a03aeac9967..c2bbd2a7f2e 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -16,7 +16,6 @@ // under the License. package com.cloud.vm; -import static com.cloud.configuration.ConfigurationManagerImpl.VM_USERDATA_MAX_LENGTH; import static com.cloud.storage.Volume.IOPS_LIMIT; import static com.cloud.utils.NumbersUtil.toHumanReadableSize; import static org.apache.cloudstack.api.ApiConstants.MAX_IOPS; @@ -28,6 +27,7 @@ import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.HashSet; @@ -54,11 +54,7 @@ import javax.naming.ConfigurationException; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.ParserConfigurationException; -import com.cloud.kubernetes.cluster.KubernetesClusterHelper; -import com.cloud.network.dao.NsxProviderDao; -import com.cloud.network.element.NsxProviderVO; -import com.cloud.user.AccountVO; -import com.cloud.utils.exception.ExceptionProxyObject; +import com.cloud.deploy.DeploymentPlan; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; @@ -71,9 +67,11 @@ import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.BaseCmd.HTTPMethod; import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd; import org.apache.cloudstack.api.command.admin.vm.DeployVMCmdByAdmin; +import org.apache.cloudstack.api.command.admin.vm.ExpungeVMCmd; import org.apache.cloudstack.api.command.admin.vm.RecoverVMCmd; import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd; import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; @@ -136,9 +134,8 @@ import org.apache.cloudstack.storage.template.VnfTemplateManager; import org.apache.cloudstack.userdata.UserDataManager; import org.apache.cloudstack.utils.bytescale.ByteScaleUtils; import org.apache.cloudstack.utils.security.ParserUtils; -import org.apache.cloudstack.vm.schedule.VMScheduleManager; import org.apache.cloudstack.vm.UnmanagedVMsManager; -import org.apache.commons.codec.binary.Base64; +import org.apache.cloudstack.vm.schedule.VMScheduleManager; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.math.NumberUtils; @@ -146,6 +143,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; import org.jetbrains.annotations.NotNull; +import org.springframework.beans.factory.NoSuchBeanDefinitionException; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; import org.w3c.dom.Document; @@ -246,6 +244,7 @@ import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; import com.cloud.hypervisor.kvm.dpdk.DpdkHelper; +import com.cloud.kubernetes.cluster.KubernetesServiceHelper; import com.cloud.network.IpAddressManager; import com.cloud.network.Network; import com.cloud.network.Network.GuestType; @@ -264,7 +263,9 @@ import com.cloud.network.dao.LoadBalancerVMMapVO; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkServiceMapDao; import com.cloud.network.dao.NetworkVO; +import com.cloud.network.dao.NsxProviderDao; import com.cloud.network.dao.PhysicalNetworkDao; +import com.cloud.network.element.NsxProviderVO; import com.cloud.network.element.UserDataServiceProvider; import com.cloud.network.guru.NetworkGuru; import com.cloud.network.lb.LoadBalancingRulesManager; @@ -333,6 +334,7 @@ import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.AccountService; +import com.cloud.user.AccountVO; import com.cloud.user.ResourceLimitService; import com.cloud.user.SSHKeyPairVO; import com.cloud.user.User; @@ -351,6 +353,7 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.Journal; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.crypt.DBEncryptionUtil; @@ -366,6 +369,7 @@ import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.db.UUIDManager; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.exception.ExceptionProxyObject; import com.cloud.utils.exception.ExecutionException; import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.net.Ip; @@ -601,6 +605,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @Inject NsxProviderDao nsxProviderDao; + private ScheduledExecutorService _executor = null; private ScheduledExecutorService _vmIpFetchExecutor = null; private int _expungeInterval; @@ -608,7 +613,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir private boolean _dailyOrHourly = false; private int capacityReleaseInterval; private ExecutorService _vmIpFetchThreadExecutor; - private List kubernetesClusterHelpers; + private List kubernetesServiceHelpers; private String _instance; @@ -622,12 +627,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir private static final int NUM_OF_2K_BLOCKS = 512; private static final int MAX_HTTP_POST_LENGTH = NUM_OF_2K_BLOCKS * MAX_USER_DATA_LENGTH_BYTES; - public List getKubernetesClusterHelpers() { - return kubernetesClusterHelpers; + public List getKubernetesServiceHelpers() { + return kubernetesServiceHelpers; } - public void setKubernetesClusterHelpers(final List kubernetesClusterHelpers) { - this.kubernetesClusterHelpers = kubernetesClusterHelpers; + public void setKubernetesServiceHelpers(final List kubernetesServiceHelpers) { + this.kubernetesServiceHelpers = kubernetesServiceHelpers; } @Inject @@ -953,6 +958,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (userVm == null) { throw new InvalidParameterValueException("unable to find a virtual machine by id" + cmd.getId()); } + if (UserVmManager.SHAREDFSVM.equals(userVm.getUserVmType())) { + throw new InvalidParameterValueException("Operation not supported on Shared FileSystem Instance"); + } _accountMgr.checkAccess(caller, null, true, userVm); VMTemplateVO template = _templateDao.findByIdIncludingRemoved(userVm.getTemplateId()); @@ -997,6 +1005,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (userVm == null) { throw new InvalidParameterValueException("unable to find a virtual machine by id" + cmd.getId()); } + if (UserVmManager.SHAREDFSVM.equals(userVm.getUserVmType())) { + throw new InvalidParameterValueException("Operation not supported on Shared FileSystem Instance"); + } VMTemplateVO template = _templateDao.findByIdIncludingRemoved(userVm.getTemplateId()); @@ -1436,6 +1447,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("unable to find a network with id " + networkId); } + if (UserVmManager.SHAREDFSVM.equals(vmInstance.getUserVmType()) && network.getGuestType() == Network.GuestType.Shared) { + if ((network.getAclType() != ControlledEntity.ACLType.Account) || + (network.getDomainId() != vmInstance.getDomainId()) || + (network.getAccountId() != vmInstance.getAccountId())) { + throw new InvalidParameterValueException("Shared network which is not Account scoped and not belonging to the same account can not be added to a Shared FileSystem Instance"); + } + } + Account vmOwner = _accountMgr.getAccount(vmInstance.getAccountId()); _networkModel.checkNetworkPermissions(vmOwner, network); @@ -1946,6 +1965,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException { VMInstanceVO vmInstance = _vmInstanceDao.findById(vmId); + Account caller = CallContext.current().getCallingAccount(); _accountMgr.checkAccess(caller, null, true, vmInstance); if (vmInstance == null) { @@ -1964,9 +1984,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir HostVO instanceHost = _hostDao.findById(vmInstance.getHostId()); _hostDao.loadHostTags(instanceHost); - if (!instanceHost.checkHostServiceOfferingAndTemplateTags(newServiceOfferingVO, template)) { - logger.error(String.format("Cannot upgrade VM [%s] as the new service offering [%s] does not have the required host tags %s.", vmInstance, newServiceOfferingVO, - instanceHost.getHostTags())); + Set strictHostTags = UserVmManager.getStrictHostTags(); + if (!instanceHost.checkHostServiceOfferingAndTemplateTags(newServiceOfferingVO, template, strictHostTags)) { + logger.error("Cannot upgrade VM {} as the new service offering {} does not have the required host tags {}.", + vmInstance, newServiceOfferingVO, + instanceHost.getHostServiceOfferingAndTemplateMissingTags(newServiceOfferingVO, template, strictHostTags)); return false; } } @@ -2075,6 +2097,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // Check disable threshold for cluster is not crossed HostVO host = _hostDao.findById(vmInstance.getHostId()); + _hostDao.loadDetails(host); if (_capacityMgr.checkIfClusterCrossesThreshold(host.getClusterId(), cpuDiff, memoryDiff)) { throw new CloudRuntimeException(String.format("Unable to scale %s due to insufficient resources.", vmInstance.toString())); } @@ -2087,12 +2110,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir _resourceLimitMgr.updateVmResourceCountForServiceOfferingChange(caller.getAccountId(), vmInstance.isDisplay(), (long) currentCpu, (long) newCpu, (long) currentMemory, (long) newMemory, currentServiceOffering, newServiceOffering, template); - // #1 Check existing host has capacity + + // #1 Check existing host has capacity & and the correct tags if (!excludes.shouldAvoid(ApiDBUtils.findHostById(vmInstance.getHostId()))) { existingHostHasCapacity = _capacityMgr.checkIfHostHasCpuCapability(vmInstance.getHostId(), newCpu, newSpeed) && _capacityMgr.checkIfHostHasCapacity(vmInstance.getHostId(), cpuDiff, ByteScaleUtils.mebibytesToBytes(memoryDiff), false, _capacityMgr.getClusterOverProvisioningFactor(host.getClusterId(), Capacity.CAPACITY_TYPE_CPU), - _capacityMgr.getClusterOverProvisioningFactor(host.getClusterId(), Capacity.CAPACITY_TYPE_MEMORY), false); + _capacityMgr.getClusterOverProvisioningFactor(host.getClusterId(), Capacity.CAPACITY_TYPE_MEMORY), false) + && checkEnforceStrictHostTagCheck(vmInstance, host); excludes.addHost(vmInstance.getHostId()); } @@ -2134,12 +2159,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("Unable to Scale VM, since disk offering id associated with the old service offering is not same for new service offering"); } - DiskOfferingVO currentRootDiskOffering = _diskOfferingDao.findByIdIncludingRemoved(currentServiceOffering.getDiskOfferingId()); - DiskOfferingVO newRootDiskOffering = _diskOfferingDao.findById(newServiceOffering.getDiskOfferingId()); - - if (currentRootDiskOffering.getEncrypt() != newRootDiskOffering.getEncrypt()) { - throw new InvalidParameterValueException("Cannot change volume encryption type via service offering change"); - } + _volService.validateChangeDiskOfferingEncryptionType(currentServiceOffering.getDiskOfferingId(), newServiceOffering.getDiskOfferingId()); } private void changeDiskOfferingForRootVolume(Long vmId, DiskOfferingVO newDiskOffering, Map customParameters, Long zoneId) throws ResourceAllocationException { @@ -2272,6 +2292,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (vm == null) { throw new InvalidParameterValueException("unable to find a virtual machine with id " + vmId); } + if (UserVmManager.SHAREDFSVM.equals(vm.getUserVmType())) { + throw new InvalidParameterValueException("Operation not supported on Shared FileSystem Instance"); + } // When trying to expunge, permission is denied when the caller is not an admin and the AllowUserExpungeRecoverVm is false for the caller. if (!_accountMgr.isAdmin(userId) && !AllowUserExpungeRecoverVm.valueIn(userId)) { @@ -2561,7 +2584,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // cleanup port forwarding rules VMInstanceVO vmInstanceVO = _vmInstanceDao.findById(vmId); NsxProviderVO nsx = nsxProviderDao.findByZoneId(vmInstanceVO.getDataCenterId()); - if (Objects.isNull(nsx) || Objects.isNull(kubernetesClusterHelpers.get(0).findByVmId(vmId))) { + if (Objects.isNull(nsx) || Objects.isNull(kubernetesServiceHelpers.get(0).findByVmId(vmId))) { if (_rulesMgr.revokePortForwardingRulesForVm(vmId)) { logger.debug("Port forwarding rules are removed successfully as a part of vm id=" + vmId + " expunge"); } else { @@ -2807,6 +2830,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new CloudRuntimeException("Detail settings are read from OVA, it cannot be changed by API call."); } } + UserVmVO userVm = _vmDao.findById(cmd.getId()); + if (userVm != null && UserVmManager.SHAREDFSVM.equals(userVm.getUserVmType())) { + throw new InvalidParameterValueException("Operation not supported on Shared FileSystem Instance"); + } + String userData = cmd.getUserData(); Long userDataId = cmd.getUserdataId(); String userDataDetails = null; @@ -2814,6 +2842,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir userDataDetails = cmd.getUserdataDetails().toString(); } userData = finalizeUserData(userData, userDataId, template); + userData = userDataManager.validateUserData(userData, cmd.getHttpMethod()); long accountId = vmInstance.getAccountId(); @@ -2893,8 +2922,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } } - return updateVirtualMachine(id, displayName, group, ha, isDisplayVm, osTypeId, userData, userDataId, userDataDetails, isDynamicallyScalable, - cmd.getHttpMethod(), cmd.getCustomId(), hostName, cmd.getInstanceName(), securityGroupIdList, cmd.getDhcpOptionsMap()); + return updateVirtualMachine(id, displayName, group, ha, isDisplayVm, + cmd.getDeleteProtection(), osTypeId, userData, + userDataId, userDataDetails, isDynamicallyScalable, cmd.getHttpMethod(), + cmd.getCustomId(), hostName, cmd.getInstanceName(), securityGroupIdList, + cmd.getDhcpOptionsMap()); } private boolean isExtraConfig(String detailName) { @@ -2995,9 +3027,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } @Override - public UserVm updateVirtualMachine(long id, String displayName, String group, Boolean ha, Boolean isDisplayVmEnabled, Long osTypeId, String userData, - Long userDataId, String userDataDetails, Boolean isDynamicallyScalable, HTTPMethod httpMethod, String customId, String hostName, String instanceName, List securityGroupIdList, Map> extraDhcpOptionsMap) - throws ResourceUnavailableException, InsufficientCapacityException { + public UserVm updateVirtualMachine(long id, String displayName, String group, Boolean ha, + Boolean isDisplayVmEnabled, Boolean deleteProtection, + Long osTypeId, String userData, Long userDataId, + String userDataDetails, Boolean isDynamicallyScalable, + HTTPMethod httpMethod, String customId, String hostName, + String instanceName, List securityGroupIdList, + Map> extraDhcpOptionsMap + ) throws ResourceUnavailableException, InsufficientCapacityException { UserVmVO vm = _vmDao.findById(id); if (vm == null) { throw new CloudRuntimeException("Unable to find virtual machine with id " + id); @@ -3032,6 +3069,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir isDisplayVmEnabled = vm.isDisplayVm(); } + if (deleteProtection == null) { + deleteProtection = vm.isDeleteProtection(); + } + boolean updateUserdata = false; if (userData != null) { // check and replace newlines @@ -3086,11 +3127,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir Network defaultNetwork = null; try { DataCenterVO zone = _dcDao.findById(vm.getDataCenterId()); - if (zone.getNetworkType() == NetworkType.Basic) { // Get default guest network in Basic zone defaultNetwork = _networkModel.getExclusiveGuestNetwork(zone.getId()); - } else if (zone.isSecurityGroupEnabled()) { + } else if (_networkModel.checkSecurityGroupSupportForNetwork(_accountMgr.getActiveAccountById(vm.getAccountId()), zone, Collections.emptyList(), securityGroupIdList)) { NicVO defaultNic = _nicDao.findDefaultNicForVM(vm.getId()); if (defaultNic != null) { defaultNetwork = _networkDao.findById(defaultNic.getNetworkId()); @@ -3146,7 +3186,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir .getUuid(), nic.getId(), extraDhcpOptionsMap); } - _vmDao.updateVM(id, displayName, ha, osTypeId, userData, userDataId, userDataDetails, isDisplayVmEnabled, isDynamicallyScalable, customId, hostName, instanceName); + _vmDao.updateVM(id, displayName, ha, osTypeId, userData, userDataId, + userDataDetails, isDisplayVmEnabled, isDynamicallyScalable, + deleteProtection, customId, hostName, instanceName); if (updateUserdata) { updateUserData(vm); @@ -3258,6 +3300,20 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir return startVirtualMachine(cmd.getId(), cmd.getPodId(), cmd.getClusterId(), cmd.getHostId(), additonalParams, cmd.getDeploymentPlanner()).first(); } + @Override + @ActionEvent(eventType = EventTypes.EVENT_VM_START, eventDescription = "starting Vm", async = true) + public void startVirtualMachine(UserVm vm, DeploymentPlan plan) throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException { + _itMgr.advanceStart(vm.getUuid(), null, plan, null); + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_VM_START, eventDescription = "restarting VM for HA", async = true) + public void startVirtualMachineForHA(VirtualMachine vm, Map params, + DeploymentPlanner planner) throws InsufficientCapacityException, ResourceUnavailableException, + ConcurrentOperationException, OperationTimedoutException { + _itMgr.advanceStart(vm.getUuid(), params, planner); + } + @Override @ActionEvent(eventType = EventTypes.EVENT_VM_REBOOT, eventDescription = "rebooting Vm", async = true) public UserVm rebootVirtualMachine(RebootVMCmd cmd) throws InsufficientCapacityException, ResourceUnavailableException, ResourceAllocationException { @@ -3312,6 +3368,37 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir return null; } + /** + * Encapsulates AllowUserExpungeRecoverVm so we can unit test checkExpungeVmPermission. + */ + protected boolean getConfigAllowUserExpungeRecoverVm(Long accountId) { + return AllowUserExpungeRecoverVm.valueIn(accountId); + } + + protected void checkExpungeVmPermission (Account callingAccount) { + logger.debug(String.format("Checking if [%s] has permission for expunging VMs.", callingAccount)); + if (!_accountMgr.isAdmin(callingAccount.getId()) && !getConfigAllowUserExpungeRecoverVm(callingAccount.getId())) { + logger.error(String.format("Parameter [%s] can only be passed by Admin accounts or when the allow.user.expunge.recover.vm key is true.", ApiConstants.EXPUNGE)); + throw new PermissionDeniedException("Account does not have permission for expunging."); + } + try { + _accountMgr.checkApiAccess(callingAccount, BaseCmd.getCommandNameByClass(ExpungeVMCmd.class)); + } catch (PermissionDeniedException ex) { + logger.error(String.format("Role [%s] of [%s] does not have permission for expunging VMs.", callingAccount.getRoleId(), callingAccount)); + throw new PermissionDeniedException("Account does not have permission for expunging."); + } + } + + protected void checkPluginsIfVmCanBeDestroyed(UserVm vm) { + try { + KubernetesServiceHelper kubernetesServiceHelper = + ComponentContext.getDelegateComponentOfType(KubernetesServiceHelper.class); + kubernetesServiceHelper.checkVmCanBeDestroyed(vm); + } catch (NoSuchBeanDefinitionException ignored) { + logger.debug("No KubernetesClusterHelper bean found"); + } + } + @Override @ActionEvent(eventType = EventTypes.EVENT_VM_DESTROY, eventDescription = "destroying Vm", async = true) public UserVm destroyVm(DestroyVMCmd cmd) throws ResourceUnavailableException, ConcurrentOperationException { @@ -3319,25 +3406,37 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir long vmId = cmd.getId(); boolean expunge = cmd.getExpunge(); - // When trying to expunge, permission is denied when the caller is not an admin and the AllowUserExpungeRecoverVm is false for the caller. - if (expunge && !_accountMgr.isAdmin(ctx.getCallingAccount().getId()) && !AllowUserExpungeRecoverVm.valueIn(cmd.getEntityOwnerId())) { - throw new PermissionDeniedException("Parameter " + ApiConstants.EXPUNGE + " can be passed by Admin only. Or when the allow.user.expunge.recover.vm key is set."); + if (expunge) { + checkExpungeVmPermission(ctx.getCallingAccount()); } + // check if VM exists UserVmVO vm = _vmDao.findById(vmId); if (vm == null || vm.getRemoved() != null) { throw new InvalidParameterValueException("unable to find a virtual machine with id " + vmId); } + if (UserVmManager.SHAREDFSVM.equals(vm.getUserVmType())) { + throw new InvalidParameterValueException("Operation not supported on Shared FileSystem Instance"); + } if (Arrays.asList(State.Destroyed, State.Expunging).contains(vm.getState()) && !expunge) { logger.debug("Vm id=" + vmId + " is already destroyed"); return vm; } + if (vm.isDeleteProtection()) { + throw new InvalidParameterValueException(String.format( + "Instance [id = %s, name = %s] has delete protection enabled and cannot be deleted.", + vm.getUuid(), vm.getName())); + } + // check if vm belongs to AutoScale vm group in Disabled state autoScaleManager.checkIfVmActionAllowed(vmId); + // check if vm belongs to any plugin resources + checkPluginsIfVmCanBeDestroyed(vm); + // check if there are active volume snapshots tasks logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM with ID " + vmId); if (checkStatusOfVolumeSnapshots(vmId, Volume.Type.ROOT)) { @@ -3660,7 +3759,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // If no network is specified, find system security group enabled network if (networkIdList == null || networkIdList.isEmpty()) { - Network networkWithSecurityGroup = _networkModel.getNetworkWithSGWithFreeIPs(zone.getId()); + Network networkWithSecurityGroup = _networkModel.getNetworkWithSGWithFreeIPs(owner, zone.getId()); if (networkWithSecurityGroup == null) { throw new InvalidParameterValueException("No network with security enabled is found in zone id=" + zone.getUuid()); } @@ -3767,7 +3866,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // Verify that owner can use the service offering _accountMgr.checkAccess(owner, serviceOffering, zone); - _accountMgr.checkAccess(owner, _diskOfferingDao.findById(diskOfferingId), zone); + + DiskOffering diskOffering =_diskOfferingDao.findById(diskOfferingId); + _accountMgr.checkAccess(owner, diskOffering, zone); List vpcSupportedHTypes = _vpcMgr.getSupportedVpcHypervisors(); if (networkIdList == null || networkIdList.isEmpty()) { @@ -3901,7 +4002,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of deployVM process"); Network newNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network", owner.getAccountName() + "-network", null, null, null, false, null, owner, null, physicalNetwork, zone.getId(), ACLType.Account, null, null, null, null, true, null, null, - null, null, null, null, null, null, null, null); + null, null, null, null, null, null, null, null, null); if (newNetwork != null) { defaultNetwork = _networkDao.findById(newNetwork.getId()); } @@ -4184,7 +4285,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } - if (template.getTemplateType().equals(TemplateType.SYSTEM) && !CKS_NODE.equals(vmType)) { + if (template.getTemplateType().equals(TemplateType.SYSTEM) && !CKS_NODE.equals(vmType) && !SHAREDFSVM.equals(vmType)) { throw new InvalidParameterValueException("Unable to use system template " + template.getId() + " to deploy a user vm"); } List listZoneTemplate = _templateZoneDao.listByZoneTemplate(zone.getId(), template.getId()); @@ -4724,17 +4825,24 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir vm.setDetail(VmDetailConstants.DATA_DISK_CONTROLLER, dataDiskControllerSetting); } - String controllerSetting = StringUtils.defaultIfEmpty(_configDao.getValue(Config.VmwareRootDiskControllerType.key()), - Config.VmwareRootDiskControllerType.getDefaultValue()); - // Don't override if VM already has root/data disk controller detail if (vm.getDetail(VmDetailConstants.ROOT_DISK_CONTROLLER) == null) { - vm.setDetail(VmDetailConstants.ROOT_DISK_CONTROLLER, controllerSetting); + String vmwareRootDiskControllerTypeFromSetting = StringUtils.defaultIfEmpty(_configDao.getValue(Config.VmwareRootDiskControllerType.key()), + Config.VmwareRootDiskControllerType.getDefaultValue()); + vm.setDetail(VmDetailConstants.ROOT_DISK_CONTROLLER, vmwareRootDiskControllerTypeFromSetting); } + if (vm.getDetail(VmDetailConstants.DATA_DISK_CONTROLLER) == null) { - if (controllerSetting.equalsIgnoreCase("scsi")) { - vm.setDetail(VmDetailConstants.DATA_DISK_CONTROLLER, "scsi"); + String finalRootDiskController = vm.getDetail(VmDetailConstants.ROOT_DISK_CONTROLLER); + // Set the data disk controller detail same as the final scsi root disk controller if VM doesn't have data disk controller detail + // This is to ensure the disk controller is available for the data disks, as all the SCSI controllers are created with same controller type + String scsiControllerPattern = "(?i)\\b(scsi|lsilogic|lsilogicsas|lsisas1068|buslogic|pvscsi)\\b"; + if (finalRootDiskController.matches(scsiControllerPattern)) { + logger.info(String.format("Data disk controller was not defined, but root disk is using SCSI controller [%s]." + + "To ensure disk controllers are available for the data disks, the data disk controller is updated to match the root disk controller.", finalRootDiskController)); + vm.setDetail(VmDetailConstants.DATA_DISK_CONTROLLER, finalRootDiskController); } else { + logger.info("Data disk controller was not defined; defaulting to 'osdefault'."); vm.setDetail(VmDetailConstants.DATA_DISK_CONTROLLER, "osdefault"); } } @@ -4942,56 +5050,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } - @Override - public String validateUserData(String userData, HTTPMethod httpmethod) { - byte[] decodedUserData = null; - if (userData != null) { - - if (userData.contains("%")) { - try { - userData = URLDecoder.decode(userData, "UTF-8"); - } catch (UnsupportedEncodingException e) { - throw new InvalidParameterValueException("Url decoding of userdata failed."); - } - } - - if (!Base64.isBase64(userData)) { - throw new InvalidParameterValueException("User data is not base64 encoded"); - } - // If GET, use 4K. If POST, support up to 1M. - if (httpmethod.equals(HTTPMethod.GET)) { - if (userData.length() >= MAX_HTTP_GET_LENGTH) { - throw new InvalidParameterValueException("User data is too long for an http GET request"); - } - if (userData.length() > VM_USERDATA_MAX_LENGTH.value()) { - throw new InvalidParameterValueException("User data has exceeded configurable max length : " + VM_USERDATA_MAX_LENGTH.value()); - } - decodedUserData = Base64.decodeBase64(userData.getBytes()); - if (decodedUserData.length > MAX_HTTP_GET_LENGTH) { - throw new InvalidParameterValueException("User data is too long for GET request"); - } - } else if (httpmethod.equals(HTTPMethod.POST)) { - if (userData.length() >= MAX_HTTP_POST_LENGTH) { - throw new InvalidParameterValueException("User data is too long for an http POST request"); - } - if (userData.length() > VM_USERDATA_MAX_LENGTH.value()) { - throw new InvalidParameterValueException("User data has exceeded configurable max length : " + VM_USERDATA_MAX_LENGTH.value()); - } - decodedUserData = Base64.decodeBase64(userData.getBytes()); - if (decodedUserData.length > MAX_HTTP_POST_LENGTH) { - throw new InvalidParameterValueException("User data is too long for POST request"); - } - } - - if (decodedUserData == null || decodedUserData.length < 1) { - throw new InvalidParameterValueException("User data is too short"); - } - // Re-encode so that the '=' paddings are added if necessary since 'isBase64' does not require it, but python does on the VR. - return Base64.encodeBase64String(decodedUserData); - } - return null; - } - @Override @ActionEvent(eventType = EventTypes.EVENT_VM_CREATE, eventDescription = "deploying Vm", async = true) public UserVm startVirtualMachine(DeployVMCmd cmd) throws ResourceUnavailableException, InsufficientCapacityException, ConcurrentOperationException, ResourceAllocationException { @@ -5074,10 +5132,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } private void addUserVMCmdlineArgs(Long vmId, VirtualMachineProfile profile, DeployDestination dest, StringBuilder buf) { - UserVmVO k8sVM = _vmDao.findById(vmId); + UserVmVO vm = _vmDao.findById(vmId); buf.append(" template=domP"); buf.append(" name=").append(profile.getHostName()); - buf.append(" type=").append(k8sVM.getUserVmType()); + buf.append(" type=").append(vm.getUserVmType()); for (NicProfile nic : profile.getNics()) { int deviceId = nic.getDeviceId(); if (nic.getIPv4Address() == null) { @@ -5118,7 +5176,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir Map details = userVmDetailsDao.listDetailsKeyPairs(vm.getId()); vm.setDetails(details); StringBuilder buf = profile.getBootArgsBuilder(); - if (CKS_NODE.equals(vm.getUserVmType())) { + if (CKS_NODE.equals(vm.getUserVmType()) || SHAREDFSVM.equals(vm.getUserVmType())) { addUserVMCmdlineArgs(vm.getId(), profile, dest, buf); } // add userdata info into vm profile @@ -5339,6 +5397,13 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir public void finalizeExpunge(VirtualMachine vm) { } + private void checkForceStopVmPermission(Account callingAccount) { + if (!AllowUserForceStopVm.valueIn(callingAccount.getId())) { + logger.error("Parameter [{}] can only be passed by Admin accounts or when the allow.user.force.stop.vm config is true for the account.", ApiConstants.FORCED); + throw new PermissionDeniedException("Account does not have the permission to force stop the vm."); + } + } + @Override @ActionEvent(eventType = EventTypes.EVENT_VM_STOP, eventDescription = "stopping Vm", async = true) public UserVm stopVirtualMachine(long vmId, boolean forced) throws ConcurrentOperationException { @@ -5356,6 +5421,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("unable to find a virtual machine with id " + vmId); } + if (forced) { + checkForceStopVmPermission(caller); + } + // check if vm belongs to AutoScale vm group in Disabled state autoScaleManager.checkIfVmActionAllowed(vmId); @@ -5484,15 +5553,23 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir boolean isRootAdmin = _accountService.isRootAdmin(callerAccount.getId()); Pod destinationPod = getDestinationPod(podId, isRootAdmin); Cluster destinationCluster = getDestinationCluster(clusterId, isRootAdmin); - Host destinationHost = getDestinationHost(hostId, isRootAdmin, isExplicitHost); + HostVO destinationHost = getDestinationHost(hostId, isRootAdmin, isExplicitHost); DataCenterDeployment plan = null; boolean deployOnGivenHost = false; if (destinationHost != null) { logger.debug("Destination Host to deploy the VM is specified, specifying a deployment plan to deploy the VM"); + _hostDao.loadHostTags(destinationHost); + validateStrictHostTagCheck(vm, destinationHost); + final ServiceOfferingVO offering = serviceOfferingDao.findById(vm.getId(), vm.getServiceOfferingId()); Pair cpuCapabilityAndCapacity = _capacityMgr.checkIfHostHasCpuCapabilityAndCapacity(destinationHost, offering, false); if (!cpuCapabilityAndCapacity.first() || !cpuCapabilityAndCapacity.second()) { - String errorMsg = "Cannot deploy the VM to specified host " + hostId + "; host has cpu capability? " + cpuCapabilityAndCapacity.first() + ", host has capacity? " + cpuCapabilityAndCapacity.second(); + String errorMsg; + if (!cpuCapabilityAndCapacity.first()) { + errorMsg = String.format("Cannot deploy the VM to specified host %d, requested CPU and speed is more than the host capability", hostId); + } else { + errorMsg = String.format("Cannot deploy the VM to specified host %d, host does not have enough free CPU or RAM, please check the logs", hostId); + } logger.info(errorMsg); if (!AllowDeployVmIfGivenHostFails.value()) { throw new InvalidParameterValueException(errorMsg); @@ -5659,8 +5736,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir return destinationCluster; } - private Host getDestinationHost(Long hostId, boolean isRootAdmin, boolean isExplicitHost) { - Host destinationHost = null; + private HostVO getDestinationHost(Long hostId, boolean isRootAdmin, boolean isExplicitHost) { + HostVO destinationHost = null; if (hostId != null) { if (isExplicitHost && !isRootAdmin) { throw new PermissionDeniedException( @@ -5880,6 +5957,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir ex.addProxyObject(String.valueOf(vmId), "vmId"); throw ex; } + if (UserVmManager.SHAREDFSVM.equals(vm.getUserVmType())) { + throw new InvalidParameterValueException("Operation not supported on Shared FileSystem Instance"); + } if (vm.getRemoved() != null) { logger.trace("Vm id=" + vmId + " is already expunged"); @@ -6085,13 +6165,13 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } String userData = cmd.getUserData(); - userData = userDataManager.validateUserData(userData, cmd.getHttpMethod()); Long userDataId = cmd.getUserdataId(); String userDataDetails = null; if (MapUtils.isNotEmpty(cmd.getUserdataDetails())) { userDataDetails = cmd.getUserdataDetails().toString(); } userData = finalizeUserData(userData, userDataId, template); + userData = userDataManager.validateUserData(userData, cmd.getHttpMethod()); Account caller = CallContext.current().getCallingAccount(); Long callerId = caller.getId(); @@ -6125,7 +6205,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir dataDiskTemplateToDiskOfferingMap, userVmOVFProperties, dynamicScalingEnabled, overrideDiskOfferingId); } } else { - if (zone.isSecurityGroupEnabled()) { + if (_networkModel.checkSecurityGroupSupportForNetwork(owner, zone, networkIds, + cmd.getSecurityGroupIdList())) { vm = createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, template, networkIds, getSecurityGroupIdList(cmd, zone, template, owner), owner, name, displayName, diskOfferingId, size, group, cmd.getHypervisor(), cmd.getHttpMethod(), userData, userDataId, userDataDetails, sshKeyPairNames, cmd.getIpToNetworkMap(), addrs, displayVm, keyboard, cmd.getAffinityGroupIdList(), cmd.getDetails(), cmd.getCustomId(), cmd.getDhcpOptionsMap(), @@ -6531,6 +6612,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir + " hypervisors: [%s].", hypervisorType, HYPERVISORS_THAT_CAN_DO_STORAGE_MIGRATION_ON_NON_USER_VMS)); } + List vols = _volsDao.findByInstance(vm.getId()); + if (vols.size() > 1 && + !(HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType))) { + throw new InvalidParameterValueException("Data disks attached to the vm, can not migrate. Need to detach data disks first"); + } + // Check that Vm does not have VM Snapshots if (_vmSnapshotDao.findByVm(vmId).size() > 0) { throw new InvalidParameterValueException("VM's disk cannot be migrated, please remove all the VM Snapshots for this VM"); @@ -6702,6 +6789,31 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } + protected boolean checkEnforceStrictHostTagCheck(VMInstanceVO vm, HostVO host) { + ServiceOffering serviceOffering = serviceOfferingDao.findByIdIncludingRemoved(vm.getServiceOfferingId()); + VirtualMachineTemplate template = _templateDao.findByIdIncludingRemoved(vm.getTemplateId()); + return checkEnforceStrictHostTagCheck(host, serviceOffering, template); + } + + private boolean checkEnforceStrictHostTagCheck(HostVO host, ServiceOffering serviceOffering, VirtualMachineTemplate template) { + Set strictHostTags = UserVmManager.getStrictHostTags(); + return host.checkHostServiceOfferingAndTemplateTags(serviceOffering, template, strictHostTags); + } + + protected void validateStrictHostTagCheck(VMInstanceVO vm, HostVO host) { + ServiceOffering serviceOffering = serviceOfferingDao.findByIdIncludingRemoved(vm.getServiceOfferingId()); + VirtualMachineTemplate template = _templateDao.findByIdIncludingRemoved(vm.getTemplateId()); + + if (!checkEnforceStrictHostTagCheck(host, serviceOffering, template)) { + Set missingTags = host.getHostServiceOfferingAndTemplateMissingTags(serviceOffering, template, UserVmManager.getStrictHostTags()); + logger.error("Cannot deploy VM: {} to host : {} due to tag mismatch. host tags: {}, " + + "strict host tags: {} serviceOffering tags: {}, template tags: {}, missing tags: {}", + vm, host, host.getHostTags(), UserVmManager.getStrictHostTags(), serviceOffering.getHostTag(), template.getTemplateTag(), missingTags); + throw new InvalidParameterValueException(String.format("Cannot deploy VM, destination host: %s " + + "is not compatible for the VM", host.getName())); + } + } + private DeployDestination checkVmMigrationDestination(VMInstanceVO vm, Host srcHost, Host destinationHost) throws VirtualMachineMigrationException { if (destinationHost == null) { return null; @@ -6727,6 +6839,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new CloudRuntimeException("Cannot migrate VM, VM is DPDK enabled VM but destination host is not DPDK enabled"); } + HostVO destinationHostVO = _hostDao.findById(destinationHost.getId()); + _hostDao.loadHostTags(destinationHostVO); + validateStrictHostTagCheck(vm, destinationHostVO); + checkHostsDedication(vm, srcHost.getId(), destinationHost.getId()); // call to core process @@ -6736,7 +6852,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir DeployDestination dest = new DeployDestination(dcVO, pod, cluster, destinationHost); // check max guest vm limit for the destinationHost - HostVO destinationHostVO = _hostDao.findById(destinationHost.getId()); if (_capacityMgr.checkIfHostReachMaxGuestLimit(destinationHostVO)) { if (logger.isDebugEnabled()) { logger.debug("Host name: " + destinationHost.getName() + ", hostId: " + destinationHost.getId() @@ -7304,6 +7419,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } + if (UserVmManager.SHAREDFSVM.equals(vm.getUserVmType())) { + throw new InvalidParameterValueException("Operation not supported on Shared FileSystem Instance"); + } final Account oldAccount = _accountService.getActiveAccountById(vm.getAccountId()); if (oldAccount == null) { @@ -7384,10 +7502,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (template == null) { throw new InvalidParameterValueException(String.format("Template for VM: %s cannot be found", vm.getUuid())); } - if (!template.isPublicTemplate()) { - Account templateOwner = _accountMgr.getAccount(template.getAccountId()); - _accountMgr.checkAccess(newAccount, null, true, templateOwner); - } + _accountMgr.checkAccess(newAccount, AccessType.UseEntry, true, template); // VV 5: check the new account can create vm in the domain DomainVO domain = _domainDao.findById(cmd.getDomainId()); @@ -7514,7 +7629,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir Set applicableNetworks = new LinkedHashSet<>(); Map requestedIPv4ForNics = new HashMap<>(); Map requestedIPv6ForNics = new HashMap<>(); - if (zone.isSecurityGroupEnabled()) { // advanced zone with security groups + if (_networkModel.checkSecurityGroupSupportForNetwork(newAccount, zone, networkIdList, securityGroupIdList)) { // advanced zone with security groups // cleanup the old security groups _securityGroupMgr.removeInstanceFromGroups(cmd.getVmId()); // if networkIdList is null and the first network of vm is shared network, then keep it if possible @@ -7715,7 +7830,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir Network newNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), newAccount.getAccountName() + "-network", newAccount.getAccountName() + "-network", null, null, null, false, null, newAccount, null, physicalNetwork, zone.getId(), ACLType.Account, null, null, - null, null, true, null, null, null, null, null, null, null, null, null, null); + null, null, true, null, null, null, null, null, null, null, null, null, null, null); // if the network offering has persistent set to true, implement the network if (requiredOfferings.get(0).isPersistent()) { DeployDestination dest = new DeployDestination(zone, null, null, null); @@ -7827,6 +7942,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir ex.addProxyObject(String.valueOf(vmId), "vmId"); throw ex; } + if (UserVmManager.SHAREDFSVM.equals(vm.getUserVmType())) { + throw new InvalidParameterValueException("Operation not supported on Shared FileSystem Instance"); + } _accountMgr.checkAccess(caller, null, true, vm); VMTemplateVO template; @@ -8080,7 +8198,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (needRestart) { try { - if (vm.getDetail(VmDetailConstants.PASSWORD) != null) { + if (Objects.nonNull(password)) { params = new HashMap<>(); params.put(VirtualMachineProfile.Param.VmPassword, password); } @@ -8418,7 +8536,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir public ConfigKey[] getConfigKeys() { return new ConfigKey[] {EnableDynamicallyScaleVm, AllowDiskOfferingChangeDuringScaleVm, AllowUserExpungeRecoverVm, VmIpFetchWaitInterval, VmIpFetchTrialMax, VmIpFetchThreadPoolMax, VmIpFetchTaskWorkers, AllowDeployVmIfGivenHostFails, EnableAdditionalVmConfig, DisplayVMOVFProperties, - KvmAdditionalConfigAllowList, XenServerAdditionalConfigAllowList, VmwareAdditionalConfigAllowList, DestroyRootVolumeOnVmDestruction}; + KvmAdditionalConfigAllowList, XenServerAdditionalConfigAllowList, VmwareAdditionalConfigAllowList, DestroyRootVolumeOnVmDestruction, + EnforceStrictResourceLimitHostTagCheck, StrictHostTags, AllowUserForceStopVm}; } @Override @@ -8487,6 +8606,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (!(volume.getVolumeType() == Volume.Type.ROOT || volume.getVolumeType() == Volume.Type.DATADISK)) { throw new InvalidParameterValueException("Please specify volume of type " + Volume.Type.DATADISK.toString() + " or " + Volume.Type.ROOT.toString()); } + if (volume.isDeleteProtection()) { + throw new InvalidParameterValueException(String.format( + "Volume [id = %s, name = %s] has delete protection enabled and cannot be deleted", + volume.getUuid(), volume.getName())); + } } } @@ -8661,10 +8785,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VM_STOP, vm.getAccountId(), vm.getDataCenterId(), vm.getId(), vm.getHostName(), vm.getServiceOfferingId(), vm.getTemplateId(), vm.getHypervisorType().toString(), VirtualMachine.class.getName(), vm.getUuid(), vm.isDisplayVm()); + resourceCountDecrement(vm.getAccountId(), vm.isDisplayVm(), offering, template); resourceNotDecremented = false; } - // VM destroy usage event UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VM_DESTROY, vm.getAccountId(), vm.getDataCenterId(), vm.getId(), vm.getHostName(), vm.getServiceOfferingId(), vm.getTemplateId(), @@ -8734,8 +8858,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir private Network getNetworkForOvfNetworkMapping(DataCenter zone, Account owner) throws InsufficientCapacityException, ResourceAllocationException { Network network = null; - if (zone.isSecurityGroupEnabled()) { - network = _networkModel.getNetworkWithSGWithFreeIPs(zone.getId()); + if (zone.isSecurityGroupEnabled() || _networkModel.isSecurityGroupSupportedForZone(zone.getId())) { + network = _networkModel.getNetworkWithSGWithFreeIPs(owner, zone.getId()); if (network == null) { throw new InvalidParameterValueException("No network with security enabled is found in zone ID: " + zone.getUuid()); } diff --git a/server/src/main/java/com/cloud/vm/UserVmStateListener.java b/server/src/main/java/com/cloud/vm/UserVmStateListener.java index 6fc815dc10b..aa1805d3366 100644 --- a/server/src/main/java/com/cloud/vm/UserVmStateListener.java +++ b/server/src/main/java/com/cloud/vm/UserVmStateListener.java @@ -24,15 +24,11 @@ import java.util.Map; import javax.inject.Inject; -import com.cloud.server.ManagementService; -import com.cloud.utils.fsm.StateMachine2; -import com.cloud.vm.dao.UserVmDao; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.springframework.beans.factory.NoSuchBeanDefinitionException; - import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.framework.events.EventBus; +import org.apache.cloudstack.framework.events.EventDistributor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.springframework.beans.factory.NoSuchBeanDefinitionException; import com.cloud.configuration.Config; import com.cloud.event.EventCategory; @@ -41,12 +37,15 @@ import com.cloud.event.UsageEventUtils; import com.cloud.event.dao.UsageEventDao; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; +import com.cloud.server.ManagementService; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.fsm.StateListener; +import com.cloud.utils.fsm.StateMachine2; import com.cloud.vm.VirtualMachine.Event; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.NicDao; +import com.cloud.vm.dao.UserVmDao; public class UserVmStateListener implements StateListener { @@ -57,10 +56,9 @@ public class UserVmStateListener implements StateListener eventDescription = new HashMap(); + Map eventDescription = new HashMap<>(); eventDescription.put("resource", resourceName); eventDescription.put("id", vo.getUuid()); eventDescription.put("old-state", oldState.name()); @@ -150,12 +148,7 @@ public class UserVmStateListener implements StateListener() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + RoleVO roleVO = roleDao.findById(role.getId()); + roleVO.setState(state); + return roleDao.update(role.getId(), roleVO); + } + }); + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_ROLE_ENABLE, eventDescription = "enabling Role") + public boolean enableRole(Role role) { + return updateRoleState(role, Role.State.ENABLED); + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_ROLE_DISABLE, eventDescription = "disabling Role") + public boolean disableRole(Role role) { + return updateRoleState(role, Role.State.DISABLED); + } + @Override @ActionEvent(eventType = EventTypes.EVENT_ROLE_PERMISSION_CREATE, eventDescription = "creating Role Permission") public RolePermission createRolePermission(final Role role, final Rule rule, final Permission permission, final String description) { @@ -401,13 +433,13 @@ public class RoleManagerImpl extends ManagerBase implements RoleService, Configu @Override public List findRolesByName(String name) { - return findRolesByName(name, null, null, null).first(); + return findRolesByName(name, null, null, null, null).first(); } @Override - public Pair, Integer> findRolesByName(String name, String keyword, Long startIndex, Long limit) { + public Pair, Integer> findRolesByName(String name, String keyword, String state, Long startIndex, Long limit) { if (StringUtils.isNotBlank(name) || StringUtils.isNotBlank(keyword)) { - Pair, Integer> data = roleDao.findAllByName(name, keyword, startIndex, limit, isCallerRootAdmin()); + Pair, Integer> data = roleDao.findAllByName(name, keyword, state, startIndex, limit, isCallerRootAdmin()); int removed = removeRolesIfNeeded(data.first()); return new Pair,Integer>(ListUtils.toListOfInterface(data.first()), Integer.valueOf(data.second() - removed)); } @@ -504,15 +536,15 @@ public class RoleManagerImpl extends ManagerBase implements RoleService, Configu @Override public List findRolesByType(RoleType roleType) { - return findRolesByType(roleType, null, null).first(); + return findRolesByType(roleType, null, null, null).first(); } @Override - public Pair, Integer> findRolesByType(RoleType roleType, Long startIndex, Long limit) { + public Pair, Integer> findRolesByType(RoleType roleType, String state, Long startIndex, Long limit) { if (roleType == null || RoleType.Admin == roleType && !isCallerRootAdmin()) { return new Pair, Integer>(Collections.emptyList(), 0); } - Pair, Integer> data = roleDao.findAllByRoleType(roleType, startIndex, limit, isCallerRootAdmin()); + Pair, Integer> data = roleDao.findAllByRoleType(roleType, state, startIndex, limit, isCallerRootAdmin()); return new Pair,Integer>(ListUtils.toListOfInterface(data.first()), Integer.valueOf(data.second())); } @@ -524,8 +556,8 @@ public class RoleManagerImpl extends ManagerBase implements RoleService, Configu } @Override - public Pair, Integer> listRoles(Long startIndex, Long limit) { - Pair, Integer> data = roleDao.listAllRoles(startIndex, limit, isCallerRootAdmin()); + public Pair, Integer> listRoles(String state, Long startIndex, Long limit) { + Pair, Integer> data = roleDao.listAllRoles(state, startIndex, limit, isCallerRootAdmin()); int removed = removeRolesIfNeeded(data.first()); return new Pair,Integer>(ListUtils.toListOfInterface(data.first()), Integer.valueOf(data.second() - removed)); } @@ -577,6 +609,8 @@ public class RoleManagerImpl extends ManagerBase implements RoleService, Configu cmdList.add(ListRolePermissionsCmd.class); cmdList.add(UpdateRolePermissionCmd.class); cmdList.add(DeleteRolePermissionCmd.class); + cmdList.add(EnableRoleCmd.class); + cmdList.add(DisableRoleCmd.class); return cmdList; } } diff --git a/server/src/main/java/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java b/server/src/main/java/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java index 05f8c372826..0ec16f1e748 100644 --- a/server/src/main/java/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java @@ -60,6 +60,7 @@ import com.cloud.utils.db.TransactionCallbackNoReturn; import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.fsm.StateListener; import com.cloud.utils.fsm.StateMachine2; +import com.cloud.vm.UserVmManager; import com.cloud.vm.UserVmVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.Event; @@ -430,6 +431,9 @@ public class AffinityGroupServiceImpl extends ManagerBase implements AffinityGro if (vmInstance == null) { throw new InvalidParameterValueException("Unable to find a virtual machine with id " + vmId); } + if (UserVmManager.SHAREDFSVM.equals(vmInstance.getUserVmType())) { + throw new InvalidParameterValueException("Operation not supported on Shared FileSystem Instance"); + } // Check that the VM is stopped if (!vmInstance.getState().equals(State.Stopped)) { diff --git a/server/src/main/java/org/apache/cloudstack/annotation/AnnotationManagerImpl.java b/server/src/main/java/org/apache/cloudstack/annotation/AnnotationManagerImpl.java index 6975ecbef66..a5f7a1b8002 100644 --- a/server/src/main/java/org/apache/cloudstack/annotation/AnnotationManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/annotation/AnnotationManagerImpl.java @@ -64,7 +64,7 @@ import com.cloud.event.EventTypes; import com.cloud.exception.PermissionDeniedException; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; -import com.cloud.kubernetes.cluster.KubernetesClusterHelper; +import com.cloud.kubernetes.cluster.KubernetesServiceHelper; import com.cloud.network.as.dao.AutoScaleVmGroupDao; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.NetworkDao; @@ -163,7 +163,7 @@ public final class AnnotationManagerImpl extends ManagerBase implements Annotati EntityManager entityManager; private static final List adminRoles = Collections.singletonList(RoleType.Admin); - private List kubernetesClusterHelpers; + private List kubernetesServiceHelpers; public static final Map s_typeMap = new HashMap<>(); static { @@ -198,12 +198,12 @@ public final class AnnotationManagerImpl extends ManagerBase implements Annotati s_typeMap.put(EntityType.OBJECT_STORAGE, ApiCommandResourceType.ObjectStore); } - public List getKubernetesClusterHelpers() { - return kubernetesClusterHelpers; + public List getKubernetesServiceHelpers() { + return kubernetesServiceHelpers; } - public void setKubernetesClusterHelpers(final List kubernetesClusterHelpers) { - this.kubernetesClusterHelpers = kubernetesClusterHelpers; + public void setKubernetesServiceHelpers(final List kubernetesServiceHelpers) { + this.kubernetesServiceHelpers = kubernetesServiceHelpers; } @Override @@ -533,7 +533,7 @@ public final class AnnotationManagerImpl extends ManagerBase implements Annotati case ISO: return templateDao.findByUuid(entityUuid); case KUBERNETES_CLUSTER: - return kubernetesClusterHelpers.get(0).findByUuid(entityUuid); + return kubernetesServiceHelpers.get(0).findByUuid(entityUuid); case AUTOSCALE_VM_GROUP: return autoScaleVmGroupDao.findByUuid(entityUuid); case MANAGEMENT_SERVER: diff --git a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java index 87535978e55..6e13ba135df 100644 --- a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java @@ -26,7 +26,9 @@ import java.util.Map; import java.util.TimeZone; import java.util.Timer; import java.util.TimerTask; +import java.util.stream.Collectors; +import com.amazonaws.util.CollectionUtils; import com.cloud.storage.VolumeApiService; import com.cloud.utils.fsm.NoTransitionException; import com.cloud.vm.VirtualMachineManager; @@ -52,6 +54,9 @@ import org.apache.cloudstack.api.command.user.backup.RemoveVirtualMachineFromBac import org.apache.cloudstack.api.command.user.backup.RestoreBackupCmd; import org.apache.cloudstack.api.command.user.backup.RestoreVolumeFromBackupAndAttachToVMCmd; import org.apache.cloudstack.api.command.user.backup.UpdateBackupScheduleCmd; +import org.apache.cloudstack.api.command.user.backup.repository.AddBackupRepositoryCmd; +import org.apache.cloudstack.api.command.user.backup.repository.DeleteBackupRepositoryCmd; +import org.apache.cloudstack.api.command.user.backup.repository.ListBackupRepositoriesCmd; import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.backup.dao.BackupOfferingDao; import org.apache.cloudstack.backup.dao.BackupScheduleDao; @@ -115,6 +120,7 @@ import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; import com.google.gson.Gson; @@ -147,6 +153,8 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { @Inject private DiskOfferingDao diskOfferingDao; @Inject + private UserVmDao userVmDao; + @Inject private ApiDispatcher apiDispatcher; @Inject private AsyncJobManager asyncJobManager; @@ -233,7 +241,11 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { SearchBuilder sb = backupOfferingDao.createSearchBuilder(); sb.and("zone_id", sb.entity().getZoneId(), SearchCriteria.Op.EQ); sb.and("name", sb.entity().getName(), SearchCriteria.Op.EQ); - + CallContext ctx = CallContext.current(); + final Account caller = ctx.getCallingAccount(); + if (Account.Type.NORMAL == caller.getType()) { + sb.and("user_backups_allowed", sb.entity().isUserDrivenBackupAllowed(), SearchCriteria.Op.EQ); + } final SearchCriteria sc = sb.create(); if (zoneId != null) { @@ -243,6 +255,9 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { if (keyword != null) { sc.setParameters("name", "%" + keyword + "%"); } + if (Account.Type.NORMAL == caller.getType()) { + sc.setParameters("user_backups_allowed", true); + } Pair, Integer> result = backupOfferingDao.searchAndCount(sc, searchFilter); return new Pair<>(new ArrayList<>(result.first()), result.second()); } @@ -262,7 +277,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { return backupOfferingDao.remove(offering.getId()); } - private String createVolumeInfoFromVolumes(List vmVolumes) { + public static String createVolumeInfoFromVolumes(List vmVolumes) { List list = new ArrayList<>(); for (VolumeVO vol : vmVolumes) { list.add(new Backup.VolumeInfo(vol.getUuid(), vol.getPath(), vol.getVolumeType(), vol.getSize())); @@ -330,6 +345,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { backupProvider.getName(), backupProvider.getClass().getSimpleName(), e.getMessage()); logger.error(msg); logger.debug(msg, e); + return null; } return vm; } @@ -430,7 +446,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { throw new InvalidParameterValueException("Invalid schedule: " + cmd.getSchedule() + " for interval type: " + cmd.getIntervalType()); } - final BackupScheduleVO schedule = backupScheduleDao.findByVM(vmId); + final BackupScheduleVO schedule = backupScheduleDao.findByVMAndIntervalType(vmId, intervalType); if (schedule == null) { return backupScheduleDao.persist(new BackupScheduleVO(vmId, intervalType, scheduleString, timezoneId, nextDateTime)); } @@ -444,12 +460,12 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { } @Override - public BackupSchedule listBackupSchedule(final Long vmId) { + public List listBackupSchedule(final Long vmId) { final VMInstanceVO vm = findVmById(vmId); validateForZone(vm.getDataCenterId()); accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vm); - return backupScheduleDao.findByVM(vmId); + return backupScheduleDao.listByVM(vmId).stream().map(BackupSchedule.class::cast).collect(Collectors.toList()); } @Override @@ -607,12 +623,24 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { !vm.getState().equals(VirtualMachine.State.Destroyed)) { throw new CloudRuntimeException("Existing VM should be stopped before being restored from backup"); } - - final BackupOffering offering = backupOfferingDao.findByIdIncludingRemoved(vm.getBackupOfferingId()); - if (offering == null) { - throw new CloudRuntimeException("Failed to find backup offering of the VM backup."); + // This is done to handle historic backups if any with Veeam / Networker plugins + List backupVolumes = CollectionUtils.isNullOrEmpty(backup.getBackedUpVolumes()) ? + vm.getBackupVolumeList() : backup.getBackedUpVolumes(); + List vmVolumes = volumeDao.findByInstance(vm.getId()); + if (vmVolumes.size() != backupVolumes.size()) { + throw new CloudRuntimeException("Unable to restore VM with the current backup as the backup has different number of disks as the VM"); } + BackupOffering offering = backupOfferingDao.findByIdIncludingRemoved(vm.getBackupOfferingId()); + String errorMessage = "Failed to find backup offering of the VM backup."; + if (offering == null) { + logger.warn(errorMessage); + } + logger.debug("Attempting to get backup offering from VM backup"); + offering = backupOfferingDao.findByIdIncludingRemoved(backup.getBackupOfferingId()); + if (offering == null) { + throw new CloudRuntimeException(errorMessage); + } String backupDetailsInMessage = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(backup, "uuid", "externalId", "vmId", "type", "status", "date"); tryRestoreVM(backup, vm, offering, backupDetailsInMessage); updateVolumeState(vm, Volume.Event.RestoreSucceeded, Volume.State.Ready); @@ -749,26 +777,32 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { throw new CloudRuntimeException("VM reference for the provided VM backup not found"); } accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vmFromBackup); - - Pair restoreInfo = getRestoreVolumeHostAndDatastore(vm); - HostVO host = restoreInfo.first(); - StoragePoolVO datastore = restoreInfo.second(); - - logger.debug("Asking provider to restore volume " + backedUpVolumeUuid + " from backup " + backupId + - " (with external ID " + backup.getExternalId() + ") and attach it to VM: " + vm.getUuid()); - final BackupOffering offering = backupOfferingDao.findByIdIncludingRemoved(backup.getBackupOfferingId()); if (offering == null) { throw new CloudRuntimeException("Failed to find VM backup offering"); } BackupProvider backupProvider = getBackupProvider(offering.getProvider()); + VolumeVO backedUpVolume = volumeDao.findByUuid(backedUpVolumeUuid); + Pair restoreInfo; + if (!"nas".equals(offering.getProvider())) { + restoreInfo = getRestoreVolumeHostAndDatastore(vm); + } else { + restoreInfo = getRestoreVolumeHostAndDatastoreForNas(vm, backedUpVolume); + } + + HostVO host = restoreInfo.first(); + StoragePoolVO datastore = restoreInfo.second(); + + logger.debug("Asking provider to restore volume " + backedUpVolumeUuid + " from backup " + backupId + + " (with external ID " + backup.getExternalId() + ") and attach it to VM: " + vm.getUuid()); + logger.debug(String.format("Trying to restore volume using host private IP address: [%s].", host.getPrivateIpAddress())); String[] hostPossibleValues = {host.getPrivateIpAddress(), host.getName()}; String[] datastoresPossibleValues = {datastore.getUuid(), datastore.getName()}; - Pair result = restoreBackedUpVolume(backedUpVolumeUuid, backup, backupProvider, hostPossibleValues, datastoresPossibleValues); + Pair result = restoreBackedUpVolume(backedUpVolumeUuid, backup, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); if (BooleanUtils.isFalse(result.first())) { throw new CloudRuntimeException(String.format("Error restoring volume [%s] of VM [%s] to host [%s] using backup provider [%s] due to: [%s].", @@ -782,7 +816,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { } protected Pair restoreBackedUpVolume(final String backedUpVolumeUuid, final BackupVO backup, BackupProvider backupProvider, String[] hostPossibleValues, - String[] datastoresPossibleValues) { + String[] datastoresPossibleValues, VMInstanceVO vm) { Pair result = new Pair<>(false, ""); for (String hostData : hostPossibleValues) { for (String datastoreData : datastoresPossibleValues) { @@ -790,7 +824,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { backedUpVolumeUuid, hostData, datastoreData)); try { - result = backupProvider.restoreBackedUpVolume(backup, backedUpVolumeUuid, hostData, datastoreData); + result = backupProvider.restoreBackedUpVolume(backup, backedUpVolumeUuid, hostData, datastoreData, new Pair<>(vm.getName(), vm.getState())); if (BooleanUtils.isTrue(result.first())) { return result; @@ -843,6 +877,15 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { return new Pair<>(hostVO, storagePoolVO); } + private Pair getRestoreVolumeHostAndDatastoreForNas(VMInstanceVO vm, VolumeVO backedVolume) { + Long poolId = backedVolume.getPoolId(); + StoragePoolVO storagePoolVO = primaryDataStoreDao.findById(poolId); + HostVO hostVO = vm.getHostId() == null ? + getFirstHostFromStoragePool(storagePoolVO) : + hostDao.findById(vm.getHostId()); + return new Pair<>(hostVO, storagePoolVO); + } + /** * Find a host from storage pool access */ @@ -945,6 +988,9 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { cmdList.add(RestoreBackupCmd.class); cmdList.add(DeleteBackupCmd.class); cmdList.add(RestoreVolumeFromBackupAndAttachToVMCmd.class); + cmdList.add(AddBackupRepositoryCmd.class); + cmdList.add(DeleteBackupRepositoryCmd.class); + cmdList.add(ListBackupRepositoriesCmd.class); return cmdList; } diff --git a/server/src/main/java/org/apache/cloudstack/backup/BackupRepositoryServiceImpl.java b/server/src/main/java/org/apache/cloudstack/backup/BackupRepositoryServiceImpl.java new file mode 100644 index 00000000000..5eb6538eaf5 --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/backup/BackupRepositoryServiceImpl.java @@ -0,0 +1,114 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.backup; + +import com.cloud.user.AccountManager; +import com.cloud.utils.Pair; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.api.command.user.backup.repository.AddBackupRepositoryCmd; +import org.apache.cloudstack.api.command.user.backup.repository.DeleteBackupRepositoryCmd; +import org.apache.cloudstack.api.command.user.backup.repository.ListBackupRepositoriesCmd; +import org.apache.cloudstack.backup.dao.BackupDao; +import org.apache.cloudstack.backup.dao.BackupOfferingDao; +import org.apache.cloudstack.backup.dao.BackupRepositoryDao; +import org.apache.cloudstack.context.CallContext; + +import javax.inject.Inject; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +public class BackupRepositoryServiceImpl extends ManagerBase implements BackupRepositoryService { + + @Inject + private BackupRepositoryDao repositoryDao; + @Inject + private BackupOfferingDao backupOfferingDao; + @Inject + private BackupDao backupDao; + @Inject + private AccountManager accountManager; + + @Override + public BackupRepository addBackupRepository(AddBackupRepositoryCmd cmd) { + BackupRepositoryVO repository = new BackupRepositoryVO(cmd.getZoneId(), cmd.getProvider(), cmd.getName(), + cmd.getType(), cmd.getAddress(), cmd.getMountOptions(), cmd.getCapacityBytes()); + return repositoryDao.persist(repository); + } + + @Override + public boolean deleteBackupRepository(DeleteBackupRepositoryCmd cmd) { + BackupRepositoryVO backupRepositoryVO = repositoryDao.findById(cmd.getId()); + if (Objects.isNull(backupRepositoryVO)) { + logger.debug("Backup repository appears to already be deleted"); + return false; + } + BackupOffering offeringVO = backupOfferingDao.findByExternalId(backupRepositoryVO.getUuid(), backupRepositoryVO.getZoneId()); + if (Objects.nonNull(offeringVO)) { + List backups = backupDao.listByOfferingId(offeringVO.getId()); + if (!backups.isEmpty()) { + throw new CloudRuntimeException("Failed to delete backup repository as there are backups present on it"); + } + } + return repositoryDao.remove(backupRepositoryVO.getId()); + } + + @Override + public Pair, Integer> listBackupRepositories(ListBackupRepositoriesCmd cmd) { + Long zoneId = accountManager.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), cmd.getZoneId()); + Long id = cmd.getId(); + String name = cmd.getName(); + String provider = cmd.getProvider(); + String keyword = cmd.getKeyword(); + + SearchBuilder sb = repositoryDao.createSearchBuilder(); + sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.EQ); + sb.and("zoneId", sb.entity().getZoneId(), SearchCriteria.Op.EQ); + sb.and("provider", sb.entity().getProvider(), SearchCriteria.Op.EQ); + + SearchCriteria sc = sb.create(); + if (keyword != null) { + SearchCriteria ssc = repositoryDao.createSearchCriteria(); + ssc.addOr("name", SearchCriteria.Op.LIKE, "%" + keyword + "%"); + ssc.addOr("provider", SearchCriteria.Op.LIKE, "%" + keyword + "%"); + sc.addAnd("name", SearchCriteria.Op.SC, ssc); + } + if (Objects.nonNull(id)) { + sc.setParameters("id", id); + } + if (Objects.nonNull(name)) { + sc.setParameters("name", name); + } + if (Objects.nonNull(zoneId)) { + sc.setParameters("zoneId", zoneId); + } + if (Objects.nonNull(provider)) { + sc.setParameters("provider", provider); + } + + // search Store details by ids + Pair, Integer> repositoryVOPair = repositoryDao.searchAndCount(sc, null); + return new Pair<>(new ArrayList<>(repositoryVOPair.first()), repositoryVOPair.second()); + } +} diff --git a/server/src/main/java/org/apache/cloudstack/ca/CAManagerImpl.java b/server/src/main/java/org/apache/cloudstack/ca/CAManagerImpl.java index b5082760e68..d4ccac69d5f 100644 --- a/server/src/main/java/org/apache/cloudstack/ca/CAManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/ca/CAManagerImpl.java @@ -24,6 +24,7 @@ import java.security.KeyStore; import java.security.KeyStoreException; import java.security.cert.CertificateExpiredException; import java.security.cert.CertificateNotYetValidException; +import java.security.cert.CertificateParsingException; import java.security.cert.X509Certificate; import java.util.ArrayList; import java.util.Arrays; @@ -430,6 +431,14 @@ public class CAManagerImpl extends ManagerBase implements CAManager { @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] {CAProviderPlugin, CertKeySize, CertSignatureAlgorithm, CertValidityPeriod, AutomaticCertRenewal, AllowHostIPInSysVMAgentCert, CABackgroundJobDelay, CertExpiryAlertPeriod}; + return new ConfigKey[] {CAProviderPlugin, CertKeySize, CertSignatureAlgorithm, CertValidityPeriod, + AutomaticCertRenewal, AllowHostIPInSysVMAgentCert, CABackgroundJobDelay, CertExpiryAlertPeriod, + CertManagementCustomSubjectAlternativeName + }; + } + + @Override + public boolean isManagementCertificate(java.security.cert.Certificate certificate) throws CertificateParsingException { + return getConfiguredCaProvider().isManagementCertificate(certificate); } } diff --git a/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java b/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java index 1a540bc2531..3f312e0ba3e 100644 --- a/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java @@ -464,7 +464,7 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ Map requiresStorageMotion = hostsForMigrationOfVM.third(); for (Host destHost : compatibleDestinationHosts) { - if (!suitableDestinationHosts.contains(destHost)) { + if (!suitableDestinationHosts.contains(destHost) || cluster.getId() != destHost.getClusterId()) { continue; } Ternary metrics = algorithm.getMetrics(cluster.getId(), vm, diff --git a/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java b/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java index 62bc50889f1..3e6c460a169 100644 --- a/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java @@ -478,7 +478,8 @@ public class DiagnosticsServiceImpl extends ManagerBase implements PluggableServ private void cleanupOldDiagnosticFiles(DataStore store) { String mountPoint = null; - mountPoint = serviceImpl.mountManager.getMountPoint(store.getUri(), null); + mountPoint = serviceImpl.mountManager.getMountPoint(store.getUri(), + serviceImpl.imageStoreDetailsUtil.getNfsVersion(store.getId())); if (StringUtils.isNotBlank(mountPoint)) { File directory = new File(mountPoint + File.separator + DIAGNOSTICS_DIRECTORY); if (directory.isDirectory()) { diff --git a/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java b/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java index ebe1d7fbc73..de66ad4d5e6 100644 --- a/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java +++ b/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java @@ -99,4 +99,9 @@ public class DiagnosticsDataObject implements DataObject { public Long getRefCount() { return null; } + + @Override + public String getName() { + return dataStore.getName(); + } } diff --git a/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java b/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java new file mode 100644 index 00000000000..bbad93737f1 --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/network/RoutedIpv4ManagerImpl.java @@ -0,0 +1,1621 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.network; + +import com.cloud.api.ApiDBUtils; +import com.cloud.bgp.BGPService; +import com.cloud.dc.DataCenter; +import com.cloud.domain.Domain; +import com.cloud.event.ActionEvent; +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.Network; +import com.cloud.network.Network.Provider; +import com.cloud.network.Network.Service; +import com.cloud.network.NetworkModel; +import com.cloud.network.dao.FirewallRulesDao; +import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkServiceMapDao; +import com.cloud.network.firewall.FirewallService; +import com.cloud.network.rules.FirewallManager; +import com.cloud.network.rules.FirewallRule; +import com.cloud.network.rules.FirewallRuleVO; +import com.cloud.network.vpc.Vpc; +import com.cloud.network.vpc.VpcOffering; +import com.cloud.network.vpc.dao.VpcDao; +import com.cloud.network.vpc.dao.VpcOfferingDao; +import com.cloud.network.vpc.dao.VpcOfferingServiceMapDao; +import com.cloud.offering.NetworkOffering; +import com.cloud.offerings.dao.NetworkOfferingDao; +import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; +import com.cloud.projects.Project; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.utils.Pair; +import com.cloud.utils.component.ComponentLifecycleBase; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackWithException; +import com.cloud.utils.db.TransactionStatus; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.net.NetUtils; + +import org.apache.cloudstack.api.command.admin.network.CreateIpv4SubnetForZoneCmd; +import org.apache.cloudstack.api.command.admin.network.CreateIpv4SubnetForGuestNetworkCmd; +import org.apache.cloudstack.api.command.admin.network.DedicateIpv4SubnetForZoneCmd; +import org.apache.cloudstack.api.command.admin.network.DeleteIpv4SubnetForZoneCmd; +import org.apache.cloudstack.api.command.admin.network.DeleteIpv4SubnetForGuestNetworkCmd; +import org.apache.cloudstack.api.command.admin.network.ListIpv4SubnetsForZoneCmd; +import org.apache.cloudstack.api.command.admin.network.ListIpv4SubnetsForGuestNetworkCmd; +import org.apache.cloudstack.api.command.admin.network.ReleaseDedicatedIpv4SubnetForZoneCmd; +import org.apache.cloudstack.api.command.admin.network.UpdateIpv4SubnetForZoneCmd; +import org.apache.cloudstack.api.command.admin.network.bgp.ChangeBgpPeersForNetworkCmd; +import org.apache.cloudstack.api.command.admin.network.bgp.ChangeBgpPeersForVpcCmd; +import org.apache.cloudstack.api.command.admin.network.bgp.CreateBgpPeerCmd; +import org.apache.cloudstack.api.command.admin.network.bgp.DedicateBgpPeerCmd; +import org.apache.cloudstack.api.command.admin.network.bgp.DeleteBgpPeerCmd; +import org.apache.cloudstack.api.command.admin.network.bgp.ListBgpPeersCmd; +import org.apache.cloudstack.api.command.admin.network.bgp.ReleaseDedicatedBgpPeerCmd; +import org.apache.cloudstack.api.command.admin.network.bgp.UpdateBgpPeerCmd; +import org.apache.cloudstack.api.command.user.network.routing.CreateRoutingFirewallRuleCmd; +import org.apache.cloudstack.api.command.user.network.routing.DeleteRoutingFirewallRuleCmd; +import org.apache.cloudstack.api.command.user.network.routing.ListRoutingFirewallRulesCmd; +import org.apache.cloudstack.api.command.user.network.routing.UpdateRoutingFirewallRuleCmd; +import org.apache.cloudstack.api.response.BgpPeerResponse; +import org.apache.cloudstack.api.response.DataCenterIpv4SubnetResponse; +import org.apache.cloudstack.api.response.Ipv4SubnetForGuestNetworkResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnet; +import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnetVO; +import org.apache.cloudstack.datacenter.dao.DataCenterIpv4GuestSubnetDao; +import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMap.State; +import org.apache.cloudstack.network.dao.BgpPeerDao; +import org.apache.cloudstack.network.dao.BgpPeerDetailsDao; +import org.apache.cloudstack.network.dao.BgpPeerNetworkMapDao; +import org.apache.cloudstack.network.dao.Ipv4GuestSubnetNetworkMapDao; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.EnumUtils; +import org.apache.commons.lang3.ObjectUtils; +import org.apache.commons.lang3.StringUtils; + +import javax.inject.Inject; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + + +public class RoutedIpv4ManagerImpl extends ComponentLifecycleBase implements RoutedIpv4Manager { + + @Inject + DataCenterIpv4GuestSubnetDao dataCenterIpv4GuestSubnetDao; + @Inject + Ipv4GuestSubnetNetworkMapDao ipv4GuestSubnetNetworkMapDao; + @Inject + FirewallService firewallService; + @Inject + FirewallManager firewallManager; + @Inject + FirewallRulesDao firewallDao; + @Inject + NetworkServiceMapDao networkServiceMapDao; + @Inject + NetworkOfferingServiceMapDao networkOfferingServiceMapDao; + @Inject + NetworkOfferingDao networkOfferingDao; + @Inject + NetworkModel networkModel; + @Inject + AccountManager accountManager; + @Inject + VpcOfferingDao vpcOfferingDao; + @Inject + VpcOfferingServiceMapDao vpcOfferingServiceMapDao; + @Inject + VpcDao vpcDao; + @Inject + BgpPeerDao bgpPeerDao; + @Inject + BgpPeerDetailsDao bgpPeerDetailsDao; + @Inject + BgpPeerNetworkMapDao bgpPeerNetworkMapDao; + @Inject + NetworkDao networkDao; + @Inject + BGPService bgpService; + + @Override + public String getConfigComponentName() { + return RoutedIpv4Manager.class.getSimpleName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[] { + RoutedNetworkIPv4MaxCidrSize, RoutedNetworkIPv4MinCidrSize, RoutedIPv4NetworkCidrAutoAllocationEnabled, + RoutedVpcIPv4MaxCidrSize, RoutedVpcIPv4MinCidrSize, UseSystemBgpPeers + }; + } + + @Override + public List> getCommands() { + final List> cmdList = new ArrayList>(); + cmdList.add(CreateIpv4SubnetForZoneCmd.class); + cmdList.add(DeleteIpv4SubnetForZoneCmd.class); + cmdList.add(ListIpv4SubnetsForZoneCmd.class); + cmdList.add(UpdateIpv4SubnetForZoneCmd.class); + cmdList.add(DedicateIpv4SubnetForZoneCmd.class); + cmdList.add(ReleaseDedicatedIpv4SubnetForZoneCmd.class); + cmdList.add(CreateIpv4SubnetForGuestNetworkCmd.class); + cmdList.add(ListIpv4SubnetsForGuestNetworkCmd.class); + cmdList.add(DeleteIpv4SubnetForGuestNetworkCmd.class); + cmdList.add(CreateRoutingFirewallRuleCmd.class); + cmdList.add(ListRoutingFirewallRulesCmd.class); + cmdList.add(UpdateRoutingFirewallRuleCmd.class); + cmdList.add(DeleteRoutingFirewallRuleCmd.class); + cmdList.add(CreateBgpPeerCmd.class); + cmdList.add(DeleteBgpPeerCmd.class); + cmdList.add(ListBgpPeersCmd.class); + cmdList.add(UpdateBgpPeerCmd.class); + cmdList.add(DedicateBgpPeerCmd.class); + cmdList.add(ReleaseDedicatedBgpPeerCmd.class); + cmdList.add(ChangeBgpPeersForNetworkCmd.class); + cmdList.add(ChangeBgpPeersForVpcCmd.class); + return cmdList; + } + + + @Override + @ActionEvent(eventType = EventTypes.EVENT_ZONE_IP4_SUBNET_CREATE, + eventDescription = "Creating IPv4 subnet for a zone", + async = true, create = true) + public DataCenterIpv4GuestSubnet createDataCenterIpv4GuestSubnet(CreateIpv4SubnetForZoneCmd cmd) { + Long zoneId = cmd.getZoneId(); + String subnet = cmd.getSubnet(); + if (!NetUtils.isValidIp4Cidr(subnet)) { + throw new InvalidParameterValueException("Invalid IPv4 subnet: " + subnet); + } + + // check conflicts + List existingSubnets = dataCenterIpv4GuestSubnetDao.listByDataCenterId(zoneId); + checkConflicts(existingSubnets, subnet, null); + + Long domainId = cmd.getDomainId(); + final Long projectId = cmd.getProjectId(); + final String accountName = cmd.getAccountName(); + + Long accountId = null; + if (accountName != null || (projectId != null && projectId != -1L)) { + accountId = accountManager.finalyzeAccountId(accountName, domainId, projectId, false); + } + if (accountId != null) { + Account account = accountManager.getAccount(accountId); + domainId = account.getDomainId(); + } + + DataCenterIpv4GuestSubnetVO subnetVO = new DataCenterIpv4GuestSubnetVO(zoneId, NetUtils.transformCidr(subnet)); + if (domainId != null) { + subnetVO.setDomainId(domainId); + } + if (accountId != null) { + subnetVO.setAccountId(accountId); + } + subnetVO = dataCenterIpv4GuestSubnetDao.persist(subnetVO); + return subnetVO; + } + + @Override + public DataCenterIpv4SubnetResponse createDataCenterIpv4SubnetResponse(DataCenterIpv4GuestSubnet subnet) { + DataCenterIpv4SubnetResponse response = new DataCenterIpv4SubnetResponse(); + response.setCreated(subnet.getCreated()); + response.setSubnet(subnet.getSubnet()); + response.setId(subnet.getUuid()); + + DataCenter zone = ApiDBUtils.findZoneById(subnet.getDataCenterId()); + if (zone != null) { + response.setZoneId(zone.getUuid()); + response.setZoneName(zone.getName()); + } + + if (subnet.getDomainId() != null) { + Domain domain = ApiDBUtils.findDomainById(subnet.getDomainId()); + if (domain != null) { + response.setDomainId(domain.getUuid()); + response.setDomainName(domain.getName()); + } + } + + if (subnet.getAccountId() != null) { + Account account = ApiDBUtils.findAccountById(subnet.getAccountId()); + if (account != null) { + if (account.getType() == Account.Type.PROJECT) { + // find the project + Project project = ApiDBUtils.findProjectByProjectAccountId(account.getId()); + response.setProjectId(project.getUuid()); + response.setProjectName(project.getName()); + } else { + response.setAccountName(account.getAccountName()); + } + } + } + + response.setObjectName("zoneipv4subnet"); + return response; + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_ZONE_IP4_SUBNET_DELETE, + eventDescription = "Deleting IPv4 subnet for a zone", + async = true) + public boolean deleteDataCenterIpv4GuestSubnet(DeleteIpv4SubnetForZoneCmd cmd) { + // check if subnet is in use + Long subnetId = cmd.getId(); + List usedNetworks = ipv4GuestSubnetNetworkMapDao.listUsedByParent(subnetId); + if (CollectionUtils.isNotEmpty(usedNetworks)) { + throw new InvalidParameterValueException(String.format("The subnet is being used by %s guest networks.", usedNetworks.size())); + } + + // remove via dataCenterIpv4GuestSubnetDao and ipv4GuestSubnetNetworkMapDao + ipv4GuestSubnetNetworkMapDao.deleteByParentId(subnetId); + dataCenterIpv4GuestSubnetDao.remove(subnetId); + return true; + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_ZONE_IP4_SUBNET_UPDATE, + eventDescription = "Updating IPv4 subnet for a zone", + async = true) + public DataCenterIpv4GuestSubnet updateDataCenterIpv4GuestSubnet(UpdateIpv4SubnetForZoneCmd cmd) { + Long subnetId = cmd.getId(); + String newSubnet = cmd.getSubnet(); + DataCenterIpv4GuestSubnetVO subnetVO = dataCenterIpv4GuestSubnetDao.findById(subnetId); + if (subnetVO == null) { + throw new InvalidParameterValueException(String.format("Invalid subnet ID: %s", subnetId)); + } + + if (!NetUtils.isValidIp4Cidr(newSubnet)) { + throw new InvalidParameterValueException(String.format("Invalid IPv4 cidr: %s", newSubnet)); + } + + // check conflicts + List existingSubnets = dataCenterIpv4GuestSubnetDao.listByDataCenterId(subnetVO.getDataCenterId()); + checkConflicts(existingSubnets, newSubnet, subnetId); + + // check if subnet can be updated + List usedSubnets = ipv4GuestSubnetNetworkMapDao.listByParent(subnetId); + for (Ipv4GuestSubnetNetworkMap used : usedSubnets) { + if (!NetUtils.isNetworkAWithinNetworkB(used.getSubnet(), newSubnet)) { + throw new InvalidParameterValueException(String.format("Used subnet for guest network %s is not within new cidr: %s", used.getSubnet(), newSubnet)); + } + } + + // update via dataCenterIpv4GuestSubnetDao + DataCenterIpv4GuestSubnetVO subnet = dataCenterIpv4GuestSubnetDao.findById(subnetId); + subnet.setSubnet(NetUtils.transformCidr(newSubnet)); + dataCenterIpv4GuestSubnetDao.update(subnetId, subnet); + + return dataCenterIpv4GuestSubnetDao.findById(subnetId); + } + + private void checkConflicts(List existingSubnets, String newSubnet, Long ignoreSubnetId) { + for (DataCenterIpv4GuestSubnetVO existing : existingSubnets) { + if ((ignoreSubnetId == null || existing.getId() != ignoreSubnetId) && NetUtils.isNetworksOverlap(existing.getSubnet(), newSubnet)) { + throw new InvalidParameterValueException(String.format("Existing zone subnet %s has overlap with: %s", existing.getSubnet(), newSubnet)); + } + } + } + + @Override + public List listDataCenterIpv4GuestSubnets(ListIpv4SubnetsForZoneCmd cmd) { + Long id = cmd.getId(); + Long zoneId = cmd.getZoneId(); + String subnet = cmd.getSubnet(); + Long domainId = cmd.getDomainId(); + Long projectId = cmd.getProjectId(); + String accountName = cmd.getAccountName(); + + SearchCriteria sc = dataCenterIpv4GuestSubnetDao.createSearchCriteria(); + if (id != null) { + sc.addAnd("id", SearchCriteria.Op.EQ, id); + } + if (zoneId != null) { + sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId); + } + if (subnet != null) { + sc.addAnd("subnet", SearchCriteria.Op.EQ, subnet); + } + if (domainId != null) { + sc.addAnd("domainId", SearchCriteria.Op.EQ, domainId); + } + if (accountName != null || (projectId != null && projectId != -1L)) { + Long accountId= accountManager.finalyzeAccountId(accountName, domainId, projectId, false); + sc.addAnd("accountId", SearchCriteria.Op.EQ, accountId); + } + // search via dataCenterIpv4GuestSubnetDao + return dataCenterIpv4GuestSubnetDao.search(sc, null); + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_ZONE_IP4_SUBNET_DEDICATE, + eventDescription = "Dedicating IPv4 subnet for a zone to a domain or an account", + async = true) + public DataCenterIpv4GuestSubnet dedicateDataCenterIpv4GuestSubnet(DedicateIpv4SubnetForZoneCmd cmd) { + final Long id = cmd.getId(); + Long domainId = cmd.getDomainId(); + final Long projectId = cmd.getProjectId(); + final String accountName = cmd.getAccountName(); + + DataCenterIpv4GuestSubnetVO subnetVO = dataCenterIpv4GuestSubnetDao.findById(id); + if (subnetVO == null) { + throw new InvalidParameterValueException(String.format("Cannot find subnet with id: ", id)); + } + Long accountId = null; + if (accountName != null || (projectId != null && projectId != -1L)) { + accountId = accountManager.finalyzeAccountId(accountName, domainId, projectId, false); + } + if (accountId != null) { + Account account = accountManager.getAccount(accountId); + domainId = account.getDomainId(); + } + + // Check if the guest subnet is used by other domain or account + if (domainId != null) { + List createdSubnets = ipv4GuestSubnetNetworkMapDao.listUsedByOtherDomains(id, domainId); + if (CollectionUtils.isNotEmpty(createdSubnets)) { + throw new InvalidParameterValueException(String.format("The subnet is being used by %s guest networks of other domains.", createdSubnets.size())); + } + } + if (accountId != null) { + List createdSubnets = ipv4GuestSubnetNetworkMapDao.listUsedByOtherAccounts(id, accountId); + if (CollectionUtils.isNotEmpty(createdSubnets)) { + throw new InvalidParameterValueException(String.format("The subnet is being used by %s guest networks of other accounts.", createdSubnets.size())); + } + } + + // update domain_id or account_id via dataCenterIpv4GuestSubnetDao to Mark the subnet as dedicated + subnetVO.setDomainId(domainId); + subnetVO.setAccountId(accountId); + dataCenterIpv4GuestSubnetDao.update(id, subnetVO); + return dataCenterIpv4GuestSubnetDao.findById(id); + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_ZONE_IP4_SUBNET_RELEASE, + eventDescription = "Releasing dedicated IPv4 subnet for a zone from a domain or an account", + async = true) + public DataCenterIpv4GuestSubnet releaseDedicatedDataCenterIpv4GuestSubnet(ReleaseDedicatedIpv4SubnetForZoneCmd cmd) { + final Long id = cmd.getId(); + DataCenterIpv4GuestSubnetVO subnetVO = dataCenterIpv4GuestSubnetDao.findById(id); + if (subnetVO == null) { + throw new InvalidParameterValueException(String.format("Cannot find subnet with id: ", id)); + } + + // update domain_id and account_id to null via dataCenterIpv4GuestSubnetDao, to release the dedication + subnetVO.setDomainId(null); + subnetVO.setAccountId(null); + dataCenterIpv4GuestSubnetDao.update(id, subnetVO); + return dataCenterIpv4GuestSubnetDao.findById(id); + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_IP4_GUEST_SUBNET_CREATE, + eventDescription = "Creating IPv4 subnet for guest network", + async = true, create = true) + public Ipv4GuestSubnetNetworkMap createIpv4SubnetForGuestNetwork(CreateIpv4SubnetForGuestNetworkCmd cmd) { + if (ObjectUtils.allNull(cmd.getSubnet(), cmd.getCidrSize())) { + throw new InvalidParameterValueException("One of subnet and cidrsize must be specified"); + } + if (ObjectUtils.allNotNull(cmd.getSubnet(), cmd.getCidrSize())) { + throw new InvalidParameterValueException("subnet and cidrsize are mutually exclusive"); + } + DataCenterIpv4GuestSubnet parent = dataCenterIpv4GuestSubnetDao.findById(cmd.getParentId()); + if (parent == null) { + throw new InvalidParameterValueException("the parent subnet is invalid"); + } + if (cmd.getSubnet() != null) { + return createIpv4SubnetFromParentSubnet(parent, cmd.getSubnet()); + } else if (cmd.getCidrSize() != null) { + return createIpv4SubnetFromParentSubnet(parent, cmd.getCidrSize()); + } + return null; + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_IP4_GUEST_SUBNET_DELETE, + eventDescription = "Deleting IPv4 subnet for guest network", + async = true) + public boolean deleteIpv4SubnetForGuestNetwork(DeleteIpv4SubnetForGuestNetworkCmd cmd) { + Long mapId = cmd.getId(); + Ipv4GuestSubnetNetworkMapVO mapVO = ipv4GuestSubnetNetworkMapDao.findById(mapId); + if (mapVO == null) { + return true; + } + // check if the subnet is not in use + if (!State.Free.equals(mapVO.getState()) || mapVO.getNetworkId() != null) { + throw new InvalidParameterValueException("Cannot delete the subnet which is in use"); + } + return ipv4GuestSubnetNetworkMapDao.remove(mapId); + } + + @Override + public void releaseIpv4SubnetForGuestNetwork(long networkId) { + // check if the network has corresponding subnet + Ipv4GuestSubnetNetworkMapVO mapVO = ipv4GuestSubnetNetworkMapDao.findByNetworkId(networkId); + if (mapVO == null) { + return; + } + releaseIpv4SubnetForGuestNetworkOrVpcInternal(mapVO); + } + + @Override + public void releaseIpv4SubnetForVpc(long vpcId) { + // check if the network has corresponding subnet + Ipv4GuestSubnetNetworkMapVO mapVO = ipv4GuestSubnetNetworkMapDao.findByVpcId(vpcId); + if (mapVO == null) { + return; + } + releaseIpv4SubnetForGuestNetworkOrVpcInternal(mapVO); + } + + private void releaseIpv4SubnetForGuestNetworkOrVpcInternal(Ipv4GuestSubnetNetworkMapVO mapVO) { + ipv4GuestSubnetNetworkMapDao.remove(mapVO.getId()); + } + + @Override + public List listIpv4GuestSubnetsForGuestNetwork(ListIpv4SubnetsForGuestNetworkCmd cmd) { + Long id = cmd.getId(); + Long zoneId = cmd.getZoneId(); + Long parentId = cmd.getParentId(); + String subnet = cmd.getSubnet(); + String keyword = cmd.getKeyword(); + Long networkId = cmd.getNetworkId(); + Long vpcId = cmd.getVpcId(); + + SearchCriteria sc = ipv4GuestSubnetNetworkMapDao.createSearchCriteria(); + if (id != null) { + sc.addAnd("id", SearchCriteria.Op.EQ, id); + } + if (zoneId != null) { + List subnets = dataCenterIpv4GuestSubnetDao.listByDataCenterId(zoneId); + if (CollectionUtils.isEmpty(subnets)) { + return new ArrayList<>(); + } + List parentIds = subnets.stream().map(DataCenterIpv4GuestSubnetVO::getId).collect(Collectors.toList()); + sc.addAnd("parentId", SearchCriteria.Op.IN, parentIds.toArray()); + } + if (parentId != null) { + sc.addAnd("parentId", SearchCriteria.Op.EQ, parentId); + } + if (subnet != null) { + sc.addAnd("subnet", SearchCriteria.Op.EQ, subnet); + } + if (networkId != null) { + sc.addAnd("networkId", SearchCriteria.Op.EQ, networkId); + } + if (vpcId != null) { + sc.addAnd("vpcId", SearchCriteria.Op.EQ, vpcId); + } + if (keyword != null) { + sc.addAnd("subnet", SearchCriteria.Op.LIKE, "%" + keyword + "%"); + } + + return ipv4GuestSubnetNetworkMapDao.search(sc, null); + } + + @Override + public Ipv4SubnetForGuestNetworkResponse createIpv4SubnetForGuestNetworkResponse(Ipv4GuestSubnetNetworkMap subnet) { + Ipv4SubnetForGuestNetworkResponse response = new Ipv4SubnetForGuestNetworkResponse(); + + response.setCreated(subnet.getCreated()); + response.setSubnet(subnet.getSubnet()); + response.setState(subnet.getState().name()); + response.setId(subnet.getUuid()); + response.setAllocatedTime(subnet.getAllocated()); + Long zoneId = null; + if (subnet.getNetworkId() != null) { + Network network = ApiDBUtils.findNetworkById(subnet.getNetworkId()); + response.setNetworkId(network.getUuid()); + response.setNetworkName(network.getName()); + zoneId = network.getDataCenterId(); + } + if (subnet.getVpcId() != null) { + Vpc vpc = ApiDBUtils.findVpcById(subnet.getVpcId()); + response.setVpcId(vpc.getUuid()); + response.setVpcName(vpc.getName()); + zoneId = vpc.getZoneId(); + } + if (subnet.getParentId() != null) { + DataCenterIpv4GuestSubnet parent = dataCenterIpv4GuestSubnetDao.findById(subnet.getParentId()); + if (parent != null) { + response.setParentId(parent.getUuid()); + response.setParentSubnet(parent.getSubnet()); + zoneId = parent.getDataCenterId(); + } + } else if (subnet.getNetworkId() != null) { + Network network = ApiDBUtils.findNetworkById(subnet.getNetworkId()); + if (network != null) { + zoneId = network.getDataCenterId(); + } + } + if (zoneId != null) { + DataCenter zone = ApiDBUtils.findZoneById(zoneId); + if (zone != null) { + response.setZoneId(zone.getUuid()); + response.setZoneName(zone.getName()); + } + } + response.setObjectName("ipv4subnetforguestnetwork"); + return response; + } + + @Override + public void getOrCreateIpv4SubnetForGuestNetwork(Network network, String networkCidr) { + getOrCreateIpv4SubnetForGuestNetworkOrVpcInternal(networkCidr, network.getDomainId(), network.getAccountId(), network.getDataCenterId()); + } + + @Override + public Ipv4GuestSubnetNetworkMap getOrCreateIpv4SubnetForGuestNetwork(Long domainId, Long accountId, Long zoneId, Integer networkCidrSize) { + return getOrCreateIpv4SubnetForGuestNetworkOrVpcInternal(networkCidrSize, domainId, accountId, zoneId); + } + + @Override + public void getOrCreateIpv4SubnetForVpc(Vpc vpc, String networkCidr) { + getOrCreateIpv4SubnetForGuestNetworkOrVpcInternal(networkCidr, vpc.getDomainId(), vpc.getAccountId(), vpc.getZoneId()); + } + + @Override + public Ipv4GuestSubnetNetworkMap getOrCreateIpv4SubnetForVpc(Vpc vpc, Integer vpcCidrSize) { + return getOrCreateIpv4SubnetForGuestNetworkOrVpcInternal(vpcCidrSize, vpc.getDomainId(), vpc.getAccountId(), vpc.getZoneId()); + } + + private Ipv4GuestSubnetNetworkMap getOrCreateIpv4SubnetForGuestNetworkOrVpcInternal(Integer cidrSize, Long ownerDomainId, Long ownerAccountId, Long zoneId) { + validateNetworkCidrSize(ownerAccountId, cidrSize); + List subnets = getZoneSubnetsForAccount(ownerDomainId, ownerAccountId, zoneId); + for (DataCenterIpv4GuestSubnetVO subnet : subnets) { + Ipv4GuestSubnetNetworkMap result = getOrCreateIpv4SubnetForGuestNetworkOrVpcInternal(cidrSize, subnet); + if (result != null) { + return result; + } + } + return null; + } + + private Ipv4GuestSubnetNetworkMap getOrCreateIpv4SubnetForGuestNetworkOrVpcInternal(Integer cidrSize, DataCenterIpv4GuestSubnetVO subnet) { + Ipv4GuestSubnetNetworkMap map = ipv4GuestSubnetNetworkMapDao.findFirstAvailable(subnet.getId(), cidrSize); + if (map != null) { + return map; + } + try { + return createIpv4SubnetFromParentSubnet(subnet, cidrSize); + } catch (Exception ex) { + logger.debug("Failed to create Ipv4 subnet from parent subnet {}: {}", subnet.getSubnet(), ex.getMessage()); + } + return null; + } + + private void getOrCreateIpv4SubnetForGuestNetworkOrVpcInternal(String networkCidr, Long ownerDomainId, Long ownerAccountId, Long zoneId) { + Ipv4GuestSubnetNetworkMapVO subnetMap = ipv4GuestSubnetNetworkMapDao.findBySubnet(networkCidr); + if (subnetMap != null) { + // check if the subnet is in use + if (subnetMap.getNetworkId() != null || subnetMap.getVpcId() != null) { + throw new InvalidParameterValueException("The subnet is in use"); + } + // check if the subnet accessible by the owner + if (subnetMap.getParentId() != null) { + DataCenterIpv4GuestSubnetVO parent = dataCenterIpv4GuestSubnetDao.findById(subnetMap.getParentId()); + checkIfNetworkOwnerCanAccessIpv4Subnet(parent, ownerDomainId, ownerAccountId); + } + return; + } + + DataCenterIpv4GuestSubnet parent = getParentOfNetworkCidr(zoneId, networkCidr); + if (parent != null) { + // check if the parent subnet is accessible by the owner + checkIfNetworkOwnerCanAccessIpv4Subnet(parent, ownerDomainId, ownerAccountId); + } + + // Create new record without networkId + final Long parentId = parent != null ? parent.getId() : null; + subnetMap = new Ipv4GuestSubnetNetworkMapVO(parentId, NetUtils.transformCidr(networkCidr), null, State.Free); + ipv4GuestSubnetNetworkMapDao.persist(subnetMap); + } + + private void checkIfNetworkOwnerCanAccessIpv4Subnet(DataCenterIpv4GuestSubnet parent, Long ownerDomainId, Long ownerAccountId) { + if (parent != null + && ((parent.getDomainId() != null && !parent.getDomainId().equals(ownerDomainId)) + || (parent.getAccountId() != null && !parent.getAccountId().equals(ownerAccountId)))) { + throw new InvalidParameterValueException("The owner of the network has no permission to access the subnet"); + } + } + + private DataCenterIpv4GuestSubnet getParentOfNetworkCidr(Long zoneId, String networkCidr) { + List existingSubnets = dataCenterIpv4GuestSubnetDao.listByDataCenterId(zoneId); + for (DataCenterIpv4GuestSubnetVO existing : existingSubnets) { + if (NetUtils.isNetworkAWithinNetworkB(networkCidr, existing.getSubnet())) { + // check conflicts + List subnetsForNetwork = ipv4GuestSubnetNetworkMapDao.listByParent(existing.getId()); + checkConflicts(subnetsForNetwork, networkCidr); + return existing; + } + if (NetUtils.isNetworksOverlap(existing.getSubnet(), networkCidr)) { + throw new InvalidParameterValueException(String.format("Existing zone subnet %s has overlap with: %s", existing.getSubnet(), networkCidr)); + } + } + // check conflicts + List subnetsForNetworkNoParents = ipv4GuestSubnetNetworkMapDao.listAllNoParent(); + checkConflicts(subnetsForNetworkNoParents, networkCidr); + return null; + } + + private void checkConflicts(List subnetsForNetwork, String networkCidr) { + for (Ipv4GuestSubnetNetworkMapVO subnetForNetwork : subnetsForNetwork) { + if (NetUtils.isNetworksOverlap(subnetForNetwork.getSubnet(), networkCidr)) { + throw new InvalidParameterValueException(String.format("Existing subnet %s has overlap with: %s", subnetForNetwork.getSubnet(), networkCidr)); + } + } + } + + private void validateNetworkCidrSize(long accountId, Integer networkCidrSize) { + if (networkCidrSize == null) { + throw new CloudRuntimeException("network/vpc CidrSize is null"); + } + Boolean isAutoAllocationEnabled = RoutedIPv4NetworkCidrAutoAllocationEnabled.valueIn(accountId); + if (!Boolean.TRUE.equals(isAutoAllocationEnabled)) { + throw new CloudRuntimeException("CIDR auto-allocation is disabled for this account"); + } + } + + private List getZoneSubnetsForAccount(long domainId, long accountId, long zoneId) { + // Get dedicated guest subnets for the account + List subnets = dataCenterIpv4GuestSubnetDao.listByDataCenterIdAndAccountId(zoneId, accountId); + subnets.addAll(dataCenterIpv4GuestSubnetDao.listByDataCenterIdAndDomainId(zoneId, domainId)); + // Get non-dedicated zone guest subnets for the account + subnets.addAll(dataCenterIpv4GuestSubnetDao.listNonDedicatedByDataCenterId(zoneId)); + return subnets; + } + + private Ipv4GuestSubnetNetworkMap createIpv4SubnetFromParentSubnet(DataCenterIpv4GuestSubnet parent, Integer networkCidrSize) { + DataCenterIpv4GuestSubnetVO subnetVO = dataCenterIpv4GuestSubnetDao.findById(parent.getId()); + if (subnetVO == null) { + throw new InvalidParameterValueException(String.format("Invalid subnet ID: %s", parent.getId())); + } + // Order subnets by start IP + List existingSubnets = ipv4GuestSubnetNetworkMapDao.listByParent(parent.getId()); + Collections.sort(existingSubnets, (subnet1, subnet2) -> { + Long ip1 = NetUtils.ip2Long(subnet1.getSubnet().split("/")[0]); + Long ip2 = NetUtils.ip2Long(subnet2.getSubnet().split("/")[0]); + return ip1.compareTo(ip2); + }); + // get all free IP ranges + final List> freeIpranges = new ArrayList<>(); + final long[] parentSubnetIpRange = NetUtils.getIpRangeStartIpAndEndIpFromCidr(parent.getSubnet()); + long startIp = parentSubnetIpRange[0]; + for (Ipv4GuestSubnetNetworkMapVO subnet : existingSubnets) { + long[] subnetIpRange = NetUtils.getIpRangeStartIpAndEndIpFromCidr(subnet.getSubnet()); + if (startIp < subnetIpRange[0]) { + freeIpranges.add(new Pair<>(startIp, subnetIpRange[0] -1)); + } + startIp = subnetIpRange[1] + 1; + } + if (startIp <= parentSubnetIpRange[1]) { + freeIpranges.add(new Pair<>(startIp, parentSubnetIpRange[1])); + } + // split the IP ranges into list of subnet + final List> subnetsInFreeIpRanges = new ArrayList<>(); + for (Pair freeIpRange : freeIpranges) { + subnetsInFreeIpRanges.addAll(NetUtils.splitIpRangeIntoSubnets(freeIpRange.first(), freeIpRange.second())); + } + + // Allocate a subnet automatically + String networkCidr = getFreeNetworkCidr(subnetsInFreeIpRanges, networkCidrSize); + if (networkCidr == null) { + throw new CloudRuntimeException("Failed to automatically allocate a subnet with specified cidrsize"); + } + // create DB record + Ipv4GuestSubnetNetworkMapVO subnetMap = new Ipv4GuestSubnetNetworkMapVO(parent.getId(), NetUtils.transformCidr(networkCidr), null, State.Free); + return ipv4GuestSubnetNetworkMapDao.persist(subnetMap); + } + + private String getFreeNetworkCidr(List> subnetsInFreeIpRanges, int networkCidrSize) { + for (int cidrSize = networkCidrSize; cidrSize >= 1; cidrSize--) { + for (Pair freeSubnet : subnetsInFreeIpRanges) { + if (freeSubnet.second().equals(cidrSize)) { + String networkCidr = String.format("%s/%s", NetUtils.long2Ip(freeSubnet.first()), networkCidrSize); + if (ipv4GuestSubnetNetworkMapDao.findBySubnet(networkCidr) == null) { + return networkCidr; + } + } + } + } + return null; + } + + private Ipv4GuestSubnetNetworkMap createIpv4SubnetFromParentSubnet(DataCenterIpv4GuestSubnet parent, String networkCidr) { + // Validate the network cidr + if (!NetUtils.isNetworkAWithinNetworkB(networkCidr, parent.getSubnet())) { + throw new InvalidParameterValueException(String.format("networkCidr %s is not within parent cidr: %s", networkCidr, parent.getSubnet())); + } + // check conflicts + List existingSubnets = ipv4GuestSubnetNetworkMapDao.listByParent(parent.getId()); + checkConflicts(existingSubnets, networkCidr); + + // create DB record + Ipv4GuestSubnetNetworkMapVO subnetMap = new Ipv4GuestSubnetNetworkMapVO(parent.getId(), NetUtils.transformCidr(networkCidr), null, State.Free); + return ipv4GuestSubnetNetworkMapDao.persist(subnetMap); + } + + @Override + public void assignIpv4SubnetToNetwork(Network network) { + if (network == null || network.getCidr() == null) { + return; + } + Ipv4GuestSubnetNetworkMapVO subnetMap = ipv4GuestSubnetNetworkMapDao.findBySubnet(network.getCidr()); + if (subnetMap != null) { + if (network.getId() > 0L) { + subnetMap.setNetworkId(network.getId()); + } + subnetMap.setState(State.Allocated); + subnetMap.setAllocated(new Date()); + ipv4GuestSubnetNetworkMapDao.update(subnetMap.getId(), subnetMap); + } + } + + @Override + public void assignIpv4SubnetToVpc(Vpc vpc) { + if (vpc == null || vpc.getCidr() == null) { + return; + } + Ipv4GuestSubnetNetworkMapVO subnetMap = ipv4GuestSubnetNetworkMapDao.findBySubnet(vpc.getCidr()); + if (subnetMap != null) { + if (vpc != null && vpc.getId() > 0L) { + subnetMap.setVpcId(vpc.getId()); + } + subnetMap.setState(State.Allocated); + subnetMap.setAllocated(new Date()); + ipv4GuestSubnetNetworkMapDao.update(subnetMap.getId(), subnetMap); + } + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_ROUTING_IPV4_FIREWALL_RULE_CREATE, + eventDescription = "Creating routing firewall rule", async = true) + public FirewallRule createRoutingFirewallRule(CreateRoutingFirewallRuleCmd createRoutingFirewallRuleCmd) throws NetworkRuleConflictException { + final Account caller = CallContext.current().getCallingAccount(); + final long networkId = createRoutingFirewallRuleCmd.getNetworkId(); + final Integer portStart = createRoutingFirewallRuleCmd.getSourcePortStart(); + final Integer portEnd = createRoutingFirewallRuleCmd.getSourcePortEnd(); + final FirewallRule.TrafficType trafficType = createRoutingFirewallRuleCmd.getTrafficType(); + final String protocol = createRoutingFirewallRuleCmd.getProtocol(); + final Integer icmpCode = createRoutingFirewallRuleCmd.getIcmpCode(); + final Integer icmpType = createRoutingFirewallRuleCmd.getIcmpType(); + final boolean forDisplay = createRoutingFirewallRuleCmd.isDisplay(); + final FirewallRule.FirewallRuleType type = FirewallRule.FirewallRuleType.User; + final List sourceCidrList = createRoutingFirewallRuleCmd.getSourceCidrList(); + final List destinationCidrList = createRoutingFirewallRuleCmd.getDestinationCidrList(); + + for (String cidr : sourceCidrList) { + if (!NetUtils.isValidIp4Cidr(cidr)) { + throw new InvalidParameterValueException(String.format("Invalid source IPv4 CIDR: %s", cidr)); + } + } + for (String cidr : destinationCidrList) { + if (!NetUtils.isValidIp4Cidr(cidr)) { + throw new InvalidParameterValueException(String.format("Invalid destination IPv4 CIDR: %s", cidr)); + } + } + if (portStart != null && !NetUtils.isValidPort(portStart)) { + throw new InvalidParameterValueException("publicPort is an invalid value: " + portStart); + } + if (portEnd != null && !NetUtils.isValidPort(portEnd)) { + throw new InvalidParameterValueException("Public port range is an invalid value: " + portEnd); + } + if (ObjectUtils.allNotNull(portStart, portEnd) && portStart > portEnd) { + throw new InvalidParameterValueException("Start port can't be bigger than end port"); + } + + Network network = networkModel.getNetwork(networkId); + assert network != null : "Can't create rule as network is null?"; + + final long accountId = network.getAccountId(); + final long domainId = network.getDomainId(); + + accountManager.checkAccess(caller, null, true, network); + + // Verify that the network guru supports the protocol specified + Map caps = networkModel.getNetworkServiceCapabilities(network.getId(), Network.Service.Firewall); + + if (caps != null) { + String supportedProtocols; + String supportedTrafficTypes = null; + supportedTrafficTypes = caps.get(Network.Capability.SupportedTrafficDirection).toLowerCase(); + + if (trafficType == FirewallRule.TrafficType.Egress) { + supportedProtocols = caps.get(Network.Capability.SupportedEgressProtocols).toLowerCase(); + } else { + supportedProtocols = caps.get(Network.Capability.SupportedProtocols).toLowerCase(); + } + + if (!supportedProtocols.contains(protocol.toLowerCase())) { + throw new InvalidParameterValueException(String.format("Protocol %s is not supported in zone", protocol)); + } else if (!supportedTrafficTypes.contains(trafficType.toString().toLowerCase())) { + throw new InvalidParameterValueException("Traffic Type " + trafficType + " is currently supported by Firewall in network " + networkId); + } + } + + // icmp code and icmp type can't be passed in for any other protocol rather than icmp + if (!protocol.equalsIgnoreCase(NetUtils.ICMP_PROTO) && (icmpCode != null || icmpType != null)) { + throw new InvalidParameterValueException("Can specify icmpCode and icmpType for ICMP protocol only"); + } + + if (protocol.equalsIgnoreCase(NetUtils.ICMP_PROTO) && (portStart != null || portEnd != null)) { + throw new InvalidParameterValueException("Can't specify start/end port when protocol is ICMP"); + } + + return Transaction.execute(new TransactionCallbackWithException() { + @Override + public FirewallRuleVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException { + FirewallRuleVO newRule = + new FirewallRuleVO(null, null, portStart, portEnd, protocol.toLowerCase(), networkId, accountId, domainId, FirewallRule.Purpose.Firewall, + sourceCidrList, destinationCidrList, icmpCode, icmpType, null, trafficType); + newRule.setType(type); + newRule.setDisplay(forDisplay); + newRule = firewallDao.persist(newRule); + + if (FirewallRule.FirewallRuleType.User.equals(type)) { + firewallManager.detectRulesConflict(newRule); + } + + if (!firewallDao.setStateToAdd(newRule)) { + throw new CloudRuntimeException("Unable to update the state to add for " + newRule); + } + CallContext.current().setEventDetails("Rule Id: " + newRule.getId()); + + return newRule; + } + }); + } + + @Override + public Pair, Integer> listRoutingFirewallRules(ListRoutingFirewallRulesCmd listRoutingFirewallRulesCmd) { + return firewallService.listFirewallRules(listRoutingFirewallRulesCmd); + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_ROUTING_IPV4_FIREWALL_RULE_UPDATE, + eventDescription = "Updating routing firewall rule", async = true) + public FirewallRule updateRoutingFirewallRule(UpdateRoutingFirewallRuleCmd updateRoutingFirewallRuleCmd) { + final long id = updateRoutingFirewallRuleCmd.getId(); + final boolean forDisplay = updateRoutingFirewallRuleCmd.isDisplay(); + FirewallRuleVO rule = firewallDao.findById(id); + if (rule == null) { + throw new InvalidParameterValueException(String.format("Unable to find routing firewall rule with id %d", id)); + } + if (FirewallRule.TrafficType.Ingress.equals(rule.getTrafficType())) { + return firewallManager.updateIngressFirewallRule(rule.getId(), null, forDisplay); + } + return firewallManager.updateEgressFirewallRule(rule.getId(), null, forDisplay); + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_ROUTING_IPV4_FIREWALL_RULE_DELETE, + eventDescription = "revoking routing firewall rule", async = true) + public boolean revokeRoutingFirewallRule(Long id) { + FirewallRuleVO rule = firewallDao.findById(id); + if (rule == null) { + throw new InvalidParameterValueException(String.format("Unable to find routing firewall rule with id %d", id)); + } + if (FirewallRule.TrafficType.Ingress.equals(rule.getTrafficType())) { + return firewallManager.revokeIngressFirewallRule(rule.getId(), true); + } + return firewallManager.revokeEgressFirewallRule(rule.getId(), true); + } + + @Override + public boolean applyRoutingFirewallRule(long id) { + FirewallRuleVO rule = firewallDao.findById(id); + if (rule == null) { + logger.error(String.format("Unable to find routing firewall rule with ID: %d", id)); + return false; + } + if (!FirewallRule.Purpose.Firewall.equals(rule.getPurpose())) { + logger.error(String.format("Cannot apply routing firewall rule with ID: %d as purpose %s is not %s", id, rule.getPurpose(), FirewallRule.Purpose.Firewall)); + } + logger.debug(String.format("Applying routing firewall rules for rule with ID: %s", rule.getUuid())); + List rules = firewallDao.listByNetworkPurposeTrafficType(rule.getNetworkId(), rule.getPurpose(), FirewallRule.TrafficType.Egress); + rules.addAll(firewallDao.listByNetworkPurposeTrafficType(rule.getNetworkId(), rule.getPurpose(), FirewallRule.TrafficType.Ingress)); + return firewallManager.applyFirewallRules(rules, false, CallContext.current().getCallingAccount()); + } + + @Override + public boolean isVirtualRouterGateway(Network network) { + return isRoutedNetwork(network) + && (networkServiceMapDao.canProviderSupportServiceInNetwork(network.getId(), Service.Gateway, Provider.VirtualRouter)) + || networkServiceMapDao.canProviderSupportServiceInNetwork(network.getId(), Service.Gateway, Provider.VPCVirtualRouter); + } + + @Override + public boolean isVirtualRouterGateway(NetworkOffering networkOffering) { + return NetworkOffering.NetworkMode.ROUTED.equals(networkOffering.getNetworkMode()) + && networkOfferingServiceMapDao.canProviderSupportServiceInNetworkOffering(networkOffering.getId(), Service.Gateway, Provider.VirtualRouter) + || networkOfferingServiceMapDao.canProviderSupportServiceInNetworkOffering(networkOffering.getId(), Service.Gateway, Provider.VPCVirtualRouter); + } + + @Override + public boolean isRoutedNetwork(Network network) { + return NetworkOffering.NetworkMode.ROUTED.equals(networkOfferingDao.findById(network.getNetworkOfferingId()).getNetworkMode()); + } + + @Override + public boolean isDynamicRoutedNetwork(Network network) { + NetworkOffering networkOffering = networkOfferingDao.findById(network.getNetworkOfferingId()); + return isDynamicRoutedNetwork(networkOffering); + } + + @Override + public boolean isDynamicRoutedNetwork(NetworkOffering networkOffering) { + return NetworkOffering.NetworkMode.ROUTED.equals(networkOffering.getNetworkMode()) + && NetworkOffering.RoutingMode.Dynamic.equals(networkOffering.getRoutingMode()); + } + + @Override + public boolean isRoutedVpc(Vpc vpc) { + return NetworkOffering.NetworkMode.ROUTED.equals(vpcOfferingDao.findById(vpc.getVpcOfferingId()).getNetworkMode()); + } + + @Override + public boolean isDynamicRoutedVpc(Vpc vpc) { + VpcOffering vpcOffering = vpcOfferingDao.findById(vpc.getVpcOfferingId()); + return isDynamicRoutedVpc(vpcOffering); + } + + @Override + public boolean isDynamicRoutedVpc(VpcOffering vpcOffering) { + return NetworkOffering.NetworkMode.ROUTED.equals(vpcOffering.getNetworkMode()) + && NetworkOffering.RoutingMode.Dynamic.equals(vpcOffering.getRoutingMode()); + } + + @Override + public boolean isVpcVirtualRouterGateway(VpcOffering vpcOffering) { + return NetworkOffering.NetworkMode.ROUTED.equals(vpcOffering.getNetworkMode()) + && vpcOfferingServiceMapDao.findByServiceProviderAndOfferingId(Service.Gateway.getName(), Provider.VPCVirtualRouter.getName(), vpcOffering.getId()) != null; + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_BGP_PEER_CREATE, + eventDescription = "Creating BGP Peer", + async = true, create = true) + public BgpPeer createBgpPeer(CreateBgpPeerCmd createBgpPeerCmd) { + Long zoneId = createBgpPeerCmd.getZoneId(); + Long asNumber = createBgpPeerCmd.getAsNumber(); + String ip4Address = createBgpPeerCmd.getIp4Address(); + String ip6Address = createBgpPeerCmd.getIp6Address(); + String password = createBgpPeerCmd.getPassword(); + Map detailsStr = createBgpPeerCmd.getDetails(); + + if (ObjectUtils.allNull(ip4Address, ip6Address)) { + throw new InvalidParameterValueException("At least one of IPv4 and IPv6 address must be specified."); + } + + if (ip4Address != null) { + if (!NetUtils.isValidIp4(ip4Address)) { + throw new InvalidParameterValueException("IPv4 address is not valid."); + } + if (bgpPeerDao.findByZoneAndAsNumberAndAddress(zoneId, asNumber, ip4Address, null) != null) { + throw new InvalidParameterValueException("There is already a BGP peer with same IPv4 address and AS number in the zone."); + } + } + + if (ip6Address != null) { + if (!NetUtils.isValidIp6(ip6Address)) { + throw new InvalidParameterValueException("IPv6 address is not valid."); + } + if (bgpPeerDao.findByZoneAndAsNumberAndAddress(zoneId, asNumber, null, ip6Address) != null) { + throw new InvalidParameterValueException("There is already a BGP peer with same IPv6 address and AS number in the zone."); + } + } + + final Map details = new HashMap<>(); + if (detailsStr != null) { + for (final String detailStr : detailsStr.keySet()) { + BgpPeer.Detail bgpPeerDetail = EnumUtils.getEnumIgnoreCase(BgpPeer.Detail.class, detailStr); + if (bgpPeerDetail == null) { + throw new InvalidParameterValueException("Unsupported BGP peer detail " + detailStr); + } + details.put(bgpPeerDetail, detailsStr.get(detailStr)); + } + } + + Long domainId = createBgpPeerCmd.getDomainId(); + final Long projectId = createBgpPeerCmd.getProjectId(); + final String accountName = createBgpPeerCmd.getAccountName(); + + Long accountId = null; + if (accountName != null || (projectId != null && projectId != -1L)) { + accountId = accountManager.finalyzeAccountId(accountName, domainId, projectId, false); + } + if (accountId != null) { + Account account = accountManager.getAccount(accountId); + domainId = account.getDomainId(); + } + + BgpPeerVO bgpPeerVO = new BgpPeerVO(zoneId, ip4Address, ip6Address, asNumber, password); + if (domainId != null) { + bgpPeerVO.setDomainId(domainId); + } + if (accountId != null) { + bgpPeerVO.setAccountId(accountId); + } + bgpPeerVO = bgpPeerDao.persist(bgpPeerVO, details); + return bgpPeerVO; + } + + @Override + public BgpPeerResponse createBgpPeerResponse(BgpPeer bgpPeer) { + BgpPeerResponse response = new BgpPeerResponse(); + response.setCreated(bgpPeer.getCreated()); + response.setAsNumber(bgpPeer.getAsNumber()); + response.setId(bgpPeer.getUuid()); + response.setIp4Address(bgpPeer.getIp4Address()); + response.setIp6Address(bgpPeer.getIp6Address()); + + DataCenter zone = ApiDBUtils.findZoneById(bgpPeer.getDataCenterId()); + if (zone != null) { + response.setZoneId(zone.getUuid()); + response.setZoneName(zone.getName()); + } + + if (bgpPeer.getDomainId() != null) { + Domain domain = ApiDBUtils.findDomainById(bgpPeer.getDomainId()); + if (domain != null) { + response.setDomainId(domain.getUuid()); + response.setDomainName(domain.getName()); + } + } + + if (bgpPeer.getAccountId() != null) { + Account account = ApiDBUtils.findAccountById(bgpPeer.getAccountId()); + if (account != null) { + if (account.getType() == Account.Type.PROJECT) { + // find the project + Project project = ApiDBUtils.findProjectByProjectAccountId(account.getId()); + response.setProjectId(project.getUuid()); + response.setProjectName(project.getName()); + } else { + response.setAccountName(account.getAccountName()); + } + } + } + + Map detailsMap = bgpPeerDetailsDao.getBgpPeerDetails(bgpPeer.getId()); + if (MapUtils.isNotEmpty(detailsMap)) { + response.setDetails(detailsMap); + } + + response.setObjectName("bgppeer"); + return response; + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_BGP_PEER_DELETE, + eventDescription = "Deleting BGP Peer", + async = true) + public boolean deleteBgpPeer(DeleteBgpPeerCmd deleteBgpPeerCmd) { + // check if BGP peer is in use + Long bgpPeerId = deleteBgpPeerCmd.getId(); + List usedBgpPeers = bgpPeerNetworkMapDao.listByBgpPeerId(bgpPeerId); + if (CollectionUtils.isNotEmpty(usedBgpPeers)) { + throw new InvalidParameterValueException(String.format("The BGP peer is being used by %s guest networks.", usedBgpPeers.size())); + } + + bgpPeerDao.remove(bgpPeerId); + return true; + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_BGP_PEER_UPDATE, + eventDescription = "Updating a BGP Peer", + async = true) + public BgpPeer updateBgpPeer(UpdateBgpPeerCmd updateBgpPeerCmd) { + Long bgpPeerId = updateBgpPeerCmd.getId(); + Long newAsNumber = updateBgpPeerCmd.getAsNumber(); + String newIp4Address = updateBgpPeerCmd.getIp4Address(); + String newIp6Address = updateBgpPeerCmd.getIp6Address(); + String password = updateBgpPeerCmd.getPassword(); + Map detailsStr = updateBgpPeerCmd.getDetails(); + + BgpPeerVO bgpPeerVO = bgpPeerDao.findById(bgpPeerId); + if (bgpPeerVO == null) { + throw new InvalidParameterValueException(String.format("Invalid BGP peer ID: %s", bgpPeerId)); + } + + Long zoneId = bgpPeerVO.getDataCenterId(); + + boolean isAsNumberChanged = (newAsNumber != null) && !newAsNumber.equals(bgpPeerVO.getAsNumber()); + boolean isIp4AddressChanged = StringUtils.isNotBlank(newIp4Address) && !newIp4Address.equals(bgpPeerVO.getIp4Address()); + boolean isIp6AddressChanged = StringUtils.isNotBlank(newIp6Address) && !newIp6Address.equals(bgpPeerVO.getIp6Address()); + + if (newAsNumber == null) { + newAsNumber = bgpPeerVO.getAsNumber(); + } + if (newIp4Address == null) { + newIp4Address = bgpPeerVO.getIp4Address(); + } else if (StringUtils.isBlank(newIp4Address)) { + newIp4Address = null; + } else if (!NetUtils.isValidIp4(newIp4Address)) { + throw new InvalidParameterValueException("new IPv4 address is not valid."); + } + + if (newIp6Address == null) { + newIp6Address = bgpPeerVO.getIp6Address(); + } else if (StringUtils.isBlank(newIp6Address)) { + newIp6Address = null; + } else if (!NetUtils.isValidIp6(newIp6Address)) { + throw new InvalidParameterValueException("new IPv6 address is not valid."); + } + + if (ObjectUtils.allNull(newIp4Address, newIp6Address)) { + throw new InvalidParameterValueException("At least one of IPv4 and IPv6 address must be specified."); + } + + if ((isAsNumberChanged || isIp4AddressChanged) && newIp4Address != null) { + if (bgpPeerDao.findByZoneAndAsNumberAndAddress(zoneId, newAsNumber, newIp4Address, null) != null) { + throw new InvalidParameterValueException("There is already a BGP peer with same IPv4 address and AS number in the zone."); + } + } + if ((isAsNumberChanged || isIp6AddressChanged) && newIp6Address != null) { + if (bgpPeerDao.findByZoneAndAsNumberAndAddress(zoneId, newAsNumber, null, newIp6Address) != null) { + throw new InvalidParameterValueException("There is already a BGP peer with same IPv6 address and AS number in the zone."); + } + } + + final Map details = new HashMap<>(); + if (detailsStr != null) { + for (final String detailStr : detailsStr.keySet()) { + BgpPeer.Detail bgpPeerDetail = EnumUtils.getEnumIgnoreCase(BgpPeer.Detail.class, detailStr); + if (bgpPeerDetail == null) { + throw new InvalidParameterValueException("Unsupported BGP peer detail " + detailStr); + } + details.put(bgpPeerDetail, detailsStr.get(detailStr)); + } + } + + // update via bgpPeerDao + bgpPeerVO.setAsNumber(newAsNumber); + bgpPeerVO.setIp4Address(newIp4Address); + bgpPeerVO.setIp6Address(newIp6Address); + if (password != null) { + bgpPeerVO.setPassword(password); + } + bgpPeerDao.update(bgpPeerId, bgpPeerVO); + + boolean cleanupDetails = updateBgpPeerCmd.isCleanupDetails(); + if (cleanupDetails){ + bgpPeerDetailsDao.removeByBgpPeerId(bgpPeerId); + } else if (MapUtils.isNotEmpty(details)) { + bgpPeerDetailsDao.removeByBgpPeerId(bgpPeerId); + List bgpPeerDetails = new ArrayList<>(); + for (BgpPeer.Detail key : details.keySet()) { + BgpPeerDetailsVO detail = new BgpPeerDetailsVO(bgpPeerVO.getId(), key, details.get(key), true); + bgpPeerDetails.add(detail); + } + bgpPeerDetailsDao.saveDetails(bgpPeerDetails); + } + + return bgpPeerDao.findById(bgpPeerId); + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_BGP_PEER_DEDICATE, + eventDescription = "Dedicating BGP Peer to a domain or an account", + async = true) + public BgpPeer dedicateBgpPeer(DedicateBgpPeerCmd dedicateBgpPeerCmd) { + final Long id = dedicateBgpPeerCmd.getId(); + Long domainId = dedicateBgpPeerCmd.getDomainId(); + final Long projectId = dedicateBgpPeerCmd.getProjectId(); + final String accountName = dedicateBgpPeerCmd.getAccountName(); + + BgpPeerVO bgpPeerVO = bgpPeerDao.findById(id); + if (bgpPeerVO == null) { + throw new InvalidParameterValueException(String.format("Cannot find BGP peer with id: ", id)); + } + Long accountId = null; + if (accountName != null || (projectId != null && projectId != -1L)) { + accountId = accountManager.finalyzeAccountId(accountName, domainId, projectId, false); + } + if (accountId != null) { + Account account = accountManager.getAccount(accountId); + domainId = account.getDomainId(); + } + + // Check if the BGP peer is used by other domain or account + if (domainId != null) { + List usedBgpPeers = bgpPeerNetworkMapDao.listUsedNetworksByOtherDomains(id, domainId); + if (CollectionUtils.isNotEmpty(usedBgpPeers)) { + throw new InvalidParameterValueException(String.format("The subnet is being used by %s guest networks of other domains.", usedBgpPeers.size())); + } + usedBgpPeers = bgpPeerNetworkMapDao.listUsedVpcsByOtherDomains(id, domainId); + if (CollectionUtils.isNotEmpty(usedBgpPeers)) { + throw new InvalidParameterValueException(String.format("The subnet is being used by %s vpcs of other domains.", usedBgpPeers.size())); + } + } + if (accountId != null) { + List usedBgpPeers = bgpPeerNetworkMapDao.listUsedNetworksByOtherAccounts(id, accountId); + if (CollectionUtils.isNotEmpty(usedBgpPeers)) { + throw new InvalidParameterValueException(String.format("The subnet is being used by %s guest networks of other accounts.", usedBgpPeers.size())); + } + usedBgpPeers = bgpPeerNetworkMapDao.listUsedVpcsByOtherAccounts(id, accountId); + if (CollectionUtils.isNotEmpty(usedBgpPeers)) { + throw new InvalidParameterValueException(String.format("The subnet is being used by %s guest networks of other accounts.", usedBgpPeers.size())); + } + } + + // update domain_id or account_id via dataCenterIpv4GuestSubnetDao to Mark the subnet as dedicated + bgpPeerVO.setDomainId(domainId); + bgpPeerVO.setAccountId(accountId); + bgpPeerDao.update(id, bgpPeerVO); + return bgpPeerDao.findById(id); + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_BGP_PEER_RELEASE, + eventDescription = "Releasing a dedicated BGP Peer from a domain or an account", + async = true) + public BgpPeer releaseDedicatedBgpPeer(ReleaseDedicatedBgpPeerCmd releaseDedicatedBgpPeerCmd) { + final Long id = releaseDedicatedBgpPeerCmd.getId(); + BgpPeerVO bgpPeerVO = bgpPeerDao.findById(id); + if (bgpPeerVO == null) { + throw new InvalidParameterValueException(String.format("Cannot find BGP peer with id: ", id)); + } + + // update domain_id and account_id to null via bgpPeerDao, to release the dedication + bgpPeerVO.setDomainId(null); + bgpPeerVO.setAccountId(null); + bgpPeerDao.update(id, bgpPeerVO); + return bgpPeerDao.findById(id); + } + + @Override + public List listBgpPeers(ListBgpPeersCmd listBgpPeersCmd) { + Long id = listBgpPeersCmd.getId(); + Long zoneId = listBgpPeersCmd.getZoneId(); + Long asNumber = listBgpPeersCmd.getAsNumber(); + Long domainId = listBgpPeersCmd.getDomainId(); + Long projectId = listBgpPeersCmd.getProjectId(); + String accountName = listBgpPeersCmd.getAccountName(); + Boolean isDedicated = listBgpPeersCmd.getDedicated(); + String keyword = listBgpPeersCmd.getKeyword(); + + Long accountId = null; + if (accountName != null || (projectId != null && projectId != -1L)) { + accountId = accountManager.finalyzeAccountId(accountName, domainId, projectId, false); + } + if (isDedicated != null) { + SearchCriteria sc1 = createSearchCriteriaForListBgpPeersCmd(id, zoneId, asNumber, keyword); + if (Boolean.TRUE.equals(isDedicated)) { + sc1.addAnd("domainId", SearchCriteria.Op.NNULL); + } else { + sc1.addAnd("domainId", SearchCriteria.Op.NULL); + } + if (domainId != null) { + sc1.addAnd("domainId", SearchCriteria.Op.EQ, domainId); + } + if (accountId != null) { + sc1.addAnd("accountId", SearchCriteria.Op.EQ, accountId); + } + // search via bgpPeerDao + return bgpPeerDao.search(sc1, null); + } else if (accountId != null) { + if (zoneId == null) { + throw new InvalidParameterValueException("zoneId is required when list BGP peers for an account."); + } + Account account = accountManager.getAccount(accountId); + List bgpPeerIds = getBgpPeerIdsForAccount(account, zoneId); + if (CollectionUtils.isEmpty(bgpPeerIds)) { + return new ArrayList<>(); + } + SearchCriteria sc2 = createSearchCriteriaForListBgpPeersCmd(id, zoneId, asNumber, keyword); + sc2.addAnd("id", SearchCriteria.Op.IN, bgpPeerIds.toArray()); + return bgpPeerDao.search(sc2, null); + } else { + SearchCriteria sc3 = createSearchCriteriaForListBgpPeersCmd(id, zoneId, asNumber, keyword); + if (domainId != null) { + sc3.addAnd("domainId", SearchCriteria.Op.EQ, domainId); + } + return bgpPeerDao.search(sc3, null); + } + } + + private SearchCriteria createSearchCriteriaForListBgpPeersCmd(Long id, Long zoneId, Long asNumber, String keyword) { + SearchCriteria sc = bgpPeerDao.createSearchCriteria(); + if (id != null) { + sc.addAnd("id", SearchCriteria.Op.EQ, id); + } + if (zoneId != null) { + sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId); + } + if (asNumber != null) { + sc.addAnd("asNumber", SearchCriteria.Op.EQ, asNumber); + } + if (StringUtils.isNotBlank(keyword)) { + SearchCriteria ssc = bgpPeerDao.createSearchCriteria(); + ssc.addOr("asNumber", SearchCriteria.Op.LIKE, "%" + keyword + "%"); + ssc.addOr("ip4Address", SearchCriteria.Op.LIKE, "%" + keyword + "%"); + ssc.addOr("ip6Address", SearchCriteria.Op.LIKE, "%" + keyword + "%"); + sc.addAnd("ip4Address", SearchCriteria.Op.SC, ssc); + } + return sc; + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_NETWORK_BGP_PEER_UPDATE, + eventDescription = "Changing BGP peers for network", async = true) + public Network changeBgpPeersForNetwork(ChangeBgpPeersForNetworkCmd changeBgpPeersForNetworkCmd) { + Long networkId = changeBgpPeersForNetworkCmd.getNetworkId(); + List bgpPeerIds = changeBgpPeersForNetworkCmd.getBgpPeerIds(); + + Network network = networkDao.findById(networkId); + if (network == null) { + throw new InvalidParameterValueException(String.format("Invalid network ID: %s", networkId)); + } + if (network.getVpcId() != null) { + throw new InvalidParameterValueException("The BGP peers of VPC tiers will inherit from the VPC, do not add separately."); + } + + Account owner = accountManager.getAccount(network.getAccountId()); + NetworkOffering networkOffering = networkOfferingDao.findById(network.getNetworkOfferingId()); + if (CollectionUtils.isNotEmpty(bgpPeerIds) && !isDynamicRoutedNetwork(networkOffering)) { + throw new InvalidParameterValueException("The network does not support Dynamic routing"); + } + validateBgpPeers(owner, network.getDataCenterId(), bgpPeerIds); + + return changeBgpPeersForNetworkInternal(network, bgpPeerIds); + } + + @Override + public Network removeBgpPeersFromNetwork(Network network) { + return changeBgpPeersForNetworkInternal(network, null); + } + + private Network changeBgpPeersForNetworkInternal(Network network, List bgpPeerIds) { + final List bgpPeerIdsToBeAdded; + if (CollectionUtils.isNotEmpty(bgpPeerIds)) { + bgpPeerIdsToBeAdded = new ArrayList<>(bgpPeerIds); + } else { + bgpPeerIdsToBeAdded = new ArrayList<>(); + } + List bgpPeerNetworkMapVOS = bgpPeerNetworkMapDao.listByNetworkId(network.getId()); + for (BgpPeerNetworkMapVO bgpPeerNetworkMapVO : bgpPeerNetworkMapVOS) { + Long bgpPeerId = bgpPeerNetworkMapVO.getBgpPeerId(); + if (bgpPeerIdsToBeAdded.contains(bgpPeerId)) { + bgpPeerIdsToBeAdded.remove(bgpPeerId); + } else { + bgpPeerNetworkMapVO.setState(BgpPeer.State.Revoke); + bgpPeerNetworkMapDao.update(bgpPeerNetworkMapVO.getId(), bgpPeerNetworkMapVO); + } + } + + for (Long bgpPeedId : bgpPeerIdsToBeAdded) { + bgpPeerNetworkMapDao.persist(new BgpPeerNetworkMapVO(bgpPeedId, network.getId(), null, BgpPeer.State.Add)); + } + + boolean result = true; + try { + result = bgpService.applyBgpPeers(network, false); + } catch (ResourceUnavailableException ex) { + logger.error("Unable to apply BGP peers due to : " + ex.getMessage()); + result = false; + } + if (result) { + logger.info("Succeed to apply BGP peers, updating state"); + bgpPeerNetworkMapVOS = bgpPeerNetworkMapDao.listByNetworkId(network.getId()); + for (BgpPeerNetworkMapVO bgpPeerNetworkMapVO : bgpPeerNetworkMapVOS) { + if (BgpPeer.State.Add.equals(bgpPeerNetworkMapVO.getState())) { + bgpPeerNetworkMapVO.setState(BgpPeer.State.Active); + bgpPeerNetworkMapDao.update(bgpPeerNetworkMapVO.getId(), bgpPeerNetworkMapVO); + } else if (BgpPeer.State.Revoke.equals(bgpPeerNetworkMapVO.getState())) { + bgpPeerNetworkMapDao.remove(bgpPeerNetworkMapVO.getId()); + } + } + } else { + logger.info("Failed to apply BGP peers, rolling back to original state"); + bgpPeerNetworkMapVOS = bgpPeerNetworkMapDao.listByNetworkId(network.getId()); + for (BgpPeerNetworkMapVO bgpPeerNetworkMapVO : bgpPeerNetworkMapVOS) { + if (BgpPeer.State.Add.equals(bgpPeerNetworkMapVO.getState())) { + bgpPeerNetworkMapDao.remove(bgpPeerNetworkMapVO.getId()); + } else if (BgpPeer.State.Revoke.equals(bgpPeerNetworkMapVO.getState())) { + bgpPeerNetworkMapVO.setState(BgpPeer.State.Add); + bgpPeerNetworkMapDao.update(bgpPeerNetworkMapVO.getId(), bgpPeerNetworkMapVO); + } + } + try { + bgpService.applyBgpPeers(network, false); + } catch (ResourceUnavailableException ex) { + logger.error("Unable to apply BGP peers after rollback due to : " + ex.getMessage()); + } + return null; + } + + return networkDao.findById(network.getId()); + } + + @Override + public void validateBgpPeers(Account owner, Long zoneId, List bgpPeerIds) { + if (CollectionUtils.isEmpty(bgpPeerIds)) { + return; + } + for (Long bgpPeerId : bgpPeerIds) { + BgpPeerVO bgpPeerVO = bgpPeerDao.findById(bgpPeerId); + if (bgpPeerVO == null) { + throw new InvalidParameterValueException(String.format("Invalid BGP peer ID: %s", bgpPeerId)); + } + if (bgpPeerVO.getDataCenterId() != zoneId) { + throw new InvalidParameterValueException(String.format("BGP peer (ID: %s) belongs to a different zone", bgpPeerVO.getUuid())); + } + if (bgpPeerVO.getDomainId() != null && !bgpPeerVO.getDomainId().equals(owner.getDomainId())) { + throw new InvalidParameterValueException(String.format("BGP peer (ID: %s) belongs to a different domain", bgpPeerVO.getUuid())); + } + if (bgpPeerVO.getAccountId() != null && !bgpPeerVO.getAccountId().equals(owner.getAccountId())) { + throw new InvalidParameterValueException(String.format("BGP peer (ID: %s) belongs to a different account", bgpPeerVO.getUuid())); + } + } + } + + @Override + public void persistBgpPeersForGuestNetwork(long networkId, List bgpPeerIds) { + bgpPeerNetworkMapDao.persistForNetwork(networkId, bgpPeerIds); + } + + @Override + public void releaseBgpPeersForGuestNetwork(long networkId) { + bgpPeerNetworkMapDao.removeByNetworkId(networkId); + } + + @Override + public void persistBgpPeersForVpc(long vpcId, List bgpPeerIds) { + bgpPeerNetworkMapDao.persistForVpc(vpcId, bgpPeerIds); + } + + @Override + public void releaseBgpPeersForVpc(long vpcId) { + bgpPeerNetworkMapDao.removeByVpcId(vpcId); + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_VPC_BGP_PEER_UPDATE, + eventDescription = "Changing BGP peers for VPC", async = true) + public Vpc changeBgpPeersForVpc(ChangeBgpPeersForVpcCmd changeBgpPeersForVpcCmd) { + Long vpcId = changeBgpPeersForVpcCmd.getVpcId(); + List bgpPeerIds = changeBgpPeersForVpcCmd.getBgpPeerIds(); + + Vpc vpc = vpcDao.findById(vpcId); + if (vpc == null) { + throw new InvalidParameterValueException(String.format("Invalid VPC ID: %s", vpcId)); + } + + Account owner = accountManager.getAccount(vpc.getAccountId()); + VpcOffering vpcOffering = vpcOfferingDao.findById(vpc.getVpcOfferingId()); + if (CollectionUtils.isNotEmpty(bgpPeerIds) && !isDynamicRoutedVpc(vpcOffering)) { + throw new InvalidParameterValueException("The VPC does not support Dynamic routing"); + } + validateBgpPeers(owner, vpc.getZoneId(), bgpPeerIds); + + return changeBgpPeersForVpcInternal(vpc, bgpPeerIds); + } + + @Override + public List getBgpPeerIdsForAccount(Account owner, long zoneId) { + return bgpPeerDao.listAvailableBgpPeerIdsForAccount(zoneId, owner.getDomainId(), owner.getId(), UseSystemBgpPeers.valueIn(owner.getId())); + } + + private Vpc changeBgpPeersForVpcInternal(Vpc vpc, List bgpPeerIds) { + final List bgpPeerIdsToBeAdded; + if (CollectionUtils.isNotEmpty(bgpPeerIds)) { + bgpPeerIdsToBeAdded = new ArrayList<>(bgpPeerIds); + } else { + bgpPeerIdsToBeAdded = new ArrayList<>(); + } + List bgpPeerNetworkMapVOS = bgpPeerNetworkMapDao.listByVpcId(vpc.getId()); + for (BgpPeerNetworkMapVO bgpPeerNetworkMapVO : bgpPeerNetworkMapVOS) { + Long bgpPeerId = bgpPeerNetworkMapVO.getBgpPeerId(); + if (bgpPeerIdsToBeAdded.contains(bgpPeerId)) { + bgpPeerIdsToBeAdded.remove(bgpPeerId); + } else { + bgpPeerNetworkMapVO.setState(BgpPeer.State.Revoke); + bgpPeerNetworkMapDao.update(bgpPeerNetworkMapVO.getId(), bgpPeerNetworkMapVO); + } + } + + for (Long bgpPeedId : bgpPeerIdsToBeAdded) { + bgpPeerNetworkMapDao.persist(new BgpPeerNetworkMapVO(bgpPeedId, null, vpc.getId(), BgpPeer.State.Add)); + } + + boolean result = true; + try { + result = bgpService.applyBgpPeers(vpc, false); + } catch (ResourceUnavailableException ex) { + logger.error("Unable to apply BGP peers due to : " + ex.getMessage()); + result = false; + } + if (result) { + logger.info("Succeed to apply BGP peers, updating state"); + bgpPeerNetworkMapVOS = bgpPeerNetworkMapDao.listByVpcId(vpc.getId()); + for (BgpPeerNetworkMapVO bgpPeerNetworkMapVO : bgpPeerNetworkMapVOS) { + if (BgpPeer.State.Add.equals(bgpPeerNetworkMapVO.getState())) { + bgpPeerNetworkMapVO.setState(BgpPeer.State.Active); + bgpPeerNetworkMapDao.update(bgpPeerNetworkMapVO.getId(), bgpPeerNetworkMapVO); + } else if (BgpPeer.State.Revoke.equals(bgpPeerNetworkMapVO.getState())) { + bgpPeerNetworkMapDao.remove(bgpPeerNetworkMapVO.getId()); + } + } + } else { + logger.info("Failed to apply BGP peers, rolling back to original state"); + bgpPeerNetworkMapVOS = bgpPeerNetworkMapDao.listByVpcId(vpc.getId()); + for (BgpPeerNetworkMapVO bgpPeerNetworkMapVO : bgpPeerNetworkMapVOS) { + if (BgpPeer.State.Add.equals(bgpPeerNetworkMapVO.getState())) { + bgpPeerNetworkMapDao.remove(bgpPeerNetworkMapVO.getId()); + } else if (BgpPeer.State.Revoke.equals(bgpPeerNetworkMapVO.getState())) { + bgpPeerNetworkMapVO.setState(BgpPeer.State.Add); + bgpPeerNetworkMapDao.update(bgpPeerNetworkMapVO.getId(), bgpPeerNetworkMapVO); + } + } + try { + bgpService.applyBgpPeers(vpc, false); + } catch (ResourceUnavailableException ex) { + logger.error("Unable to apply BGP peers after rollback due to : " + ex.getMessage()); + } + return null; + } + + return vpcDao.findById(vpc.getId()); + } + + @Override + public void removeIpv4SubnetsForZoneByAccountId(long accountId) { + List existingSubnets = dataCenterIpv4GuestSubnetDao.listByAccountId(accountId); + for (DataCenterIpv4GuestSubnetVO subnet : existingSubnets) { + ipv4GuestSubnetNetworkMapDao.deleteByParentId(subnet.getId()); + dataCenterIpv4GuestSubnetDao.remove(subnet.getId()); + } + } + + @Override + public void removeIpv4SubnetsForZoneByDomainId(long domainId) { + List existingSubnets = dataCenterIpv4GuestSubnetDao.listByDomainId(domainId); + for (DataCenterIpv4GuestSubnetVO subnet : existingSubnets) { + ipv4GuestSubnetNetworkMapDao.deleteByParentId(subnet.getId()); + dataCenterIpv4GuestSubnetDao.remove(subnet.getId()); + } + } + + @Override + public void removeBgpPeersByAccountId(long accountId) { + bgpPeerDao.removeByAccountId(accountId); + } + + @Override + public void removeBgpPeersByDomainId(long domainId) { + bgpPeerDao.removeByDomainId(domainId); + } +} diff --git a/server/src/main/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinition.java b/server/src/main/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinition.java index 2dea5a4223f..817cfe07e58 100644 --- a/server/src/main/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinition.java +++ b/server/src/main/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinition.java @@ -353,7 +353,7 @@ public class RouterDeploymentDefinition { setupAccountOwner(); // Check if public network has to be set on VR - isPublicNetwork = networkModel.isProviderSupportServiceInNetwork(guestNetwork.getId(), Service.SourceNat, Provider.VirtualRouter); + isPublicNetwork = networkModel.isAnyServiceSupportedInNetwork(guestNetwork.getId(), Provider.VirtualRouter, Service.SourceNat, Service.Gateway); boolean canProceed = true; if (isRedundant() && !isPublicNetwork) { diff --git a/server/src/main/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinition.java b/server/src/main/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinition.java index aa44f29efcd..405575c65b1 100644 --- a/server/src/main/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinition.java +++ b/server/src/main/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinition.java @@ -22,7 +22,6 @@ import java.util.Map; import java.util.Objects; import com.cloud.dc.DataCenter; -import com.cloud.dc.Vlan; import com.cloud.network.dao.IPAddressVO; import com.cloud.network.element.NsxProviderVO; @@ -132,10 +131,9 @@ public class VpcRouterDeploymentDefinition extends RouterDeploymentDefinition { if (isPublicNetwork) { if (Objects.isNull(nsxProvider)) { - sourceNatIp = vpcMgr.assignSourceNatIpAddressToVpc(owner, vpc); + sourceNatIp = vpcMgr.assignSourceNatIpAddressToVpc(owner, vpc, null); } else { - // NSX deploys VRs with Public NIC != to the source NAT, the source NAT IP is on the NSX Public range - sourceNatIp = ipAddrMgr.assignPublicIpAddress(zoneId, getPodId(), owner, Vlan.VlanType.VirtualNetwork, null, null, false, true); + sourceNatIp = vpcMgr.assignSourceNatIpAddressToVpc(owner, vpc, getPodId()); if (vpc != null) { IPAddressVO routerPublicIp = ipAddressDao.findByIp(sourceNatIp.getAddress().toString()); routerPublicIp.setVpcId(vpc.getId()); diff --git a/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java b/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java index e777e959b84..5c1fc5e9ac6 100644 --- a/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java +++ b/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java @@ -29,6 +29,7 @@ import com.cloud.network.RemoteAccessVpn; import com.cloud.network.VpnUser; import com.cloud.network.router.VirtualRouter; import com.cloud.network.rules.AdvancedVpnRules; +import com.cloud.network.rules.BgpPeersRules; import com.cloud.network.rules.DhcpEntryRules; import com.cloud.network.rules.DhcpPvlanRules; import com.cloud.network.rules.NetworkAclsRules; @@ -47,6 +48,8 @@ import com.cloud.vm.NicProfile; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.VirtualMachineProfile; +import org.apache.cloudstack.network.BgpPeer; +import org.apache.commons.collections.CollectionUtils; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.stereotype.Component; @@ -243,4 +246,27 @@ public class AdvancedNetworkTopology extends BasicNetworkTopology { final boolean result = applyRules(network, router, typeString, isPodLevelException, podId, failWhenDisconnect, new RuleApplierWrapper(aclsRules)); return result; } + + @Override + public boolean applyBgpPeers(Network network, List bpgPeers, VirtualRouter router) throws ResourceUnavailableException { + logger.debug("APPLYING BGP Peers"); + + if (CollectionUtils.isEmpty(bpgPeers)) { + logger.debug("No bgp peers to apply. However, apply BGP peers to clear the existing configuration in the VRs."); + } + + final BgpPeersRules bgpPeersRules = new BgpPeersRules(bpgPeers, network); + + boolean result = true; + if (router.getState() == State.Running) { + result = bgpPeersRules.accept(_advancedVisitor, router); + } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { + logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending BgpPeer command to the backend"); + } else { + logger.warn("Unable to apply BgpPeer, virtual router is not in the right state " + router.getState()); + throw new ResourceUnavailableException("Unable to apply BgpPeer on the backend," + " virtual router is not in the right state", DataCenter.class, + router.getDataCenterId()); + } + return result; + } } diff --git a/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkVisitor.java b/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkVisitor.java index 4db46ac36bb..bc2271b0163 100644 --- a/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkVisitor.java +++ b/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkVisitor.java @@ -21,6 +21,8 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import com.cloud.network.rules.BgpPeersRules; +import org.apache.cloudstack.network.BgpPeer; import org.springframework.stereotype.Component; import com.cloud.agent.api.Command; @@ -211,4 +213,20 @@ public class AdvancedNetworkVisitor extends BasicNetworkVisitor { // results accordingly return _networkGeneralHelper.sendCommandsToRouter(router, cmds); } + + @Override + public boolean visit(final BgpPeersRules bgpPeersRules) throws ResourceUnavailableException { + final VirtualRouter router = bgpPeersRules.getRouter(); + final List bgpPeers = bgpPeersRules.getBgpPeers(); + final Network network = bgpPeersRules.getNetwork(); + + final Commands cmds = new Commands(Command.OnError.Continue); + + _commandSetupHelper.createBgpPeersCommands(bgpPeers, router, cmds, network); + if (cmds.size() == 0) { + return true; + } + + return _networkGeneralHelper.sendCommandsToRouter(router, cmds); + } } diff --git a/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkTopology.java b/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkTopology.java index 77519c500b2..65d702b7138 100644 --- a/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkTopology.java +++ b/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkTopology.java @@ -22,6 +22,7 @@ import java.util.List; import javax.inject.Inject; +import org.apache.cloudstack.network.BgpPeer; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import org.springframework.beans.factory.annotation.Autowired; @@ -463,4 +464,9 @@ public class BasicNetworkTopology implements NetworkTopology { return applyRules(network, virtualRouter, typeString, isPodLevelException, podId, failWhenDisconnect, new RuleApplierWrapper(dhcpRules)); } + + @Override + public boolean applyBgpPeers(Network network, List bpgPeers, VirtualRouter virtualRouter) throws ResourceUnavailableException { + throw new CloudRuntimeException("applyBgpPeers not implemented in Basic Network Topology."); + } } diff --git a/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkVisitor.java b/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkVisitor.java index 78f281f32cf..8702a58ad69 100644 --- a/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkVisitor.java +++ b/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkVisitor.java @@ -22,6 +22,7 @@ import java.util.List; import javax.inject.Inject; +import com.cloud.network.rules.BgpPeersRules; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.stereotype.Component; @@ -323,4 +324,9 @@ public class BasicNetworkVisitor extends NetworkTopologyVisitor { public boolean visit(final AdvancedVpnRules vpnRules) throws ResourceUnavailableException { throw new CloudRuntimeException("AdvancedVpnRules not implemented in Basic Network Topology."); } + + @Override + public boolean visit(final BgpPeersRules bgpPeersRules) throws ResourceUnavailableException { + throw new CloudRuntimeException("BgpPeersRules not implemented in Basic Network Topology."); + } } diff --git a/server/src/main/java/org/apache/cloudstack/network/topology/NetworkTopology.java b/server/src/main/java/org/apache/cloudstack/network/topology/NetworkTopology.java index aff40ce69ac..176584780fe 100644 --- a/server/src/main/java/org/apache/cloudstack/network/topology/NetworkTopology.java +++ b/server/src/main/java/org/apache/cloudstack/network/topology/NetworkTopology.java @@ -38,6 +38,7 @@ import com.cloud.network.vpc.StaticRouteProfile; import com.cloud.vm.DomainRouterVO; import com.cloud.vm.NicProfile; import com.cloud.vm.VirtualMachineProfile; +import org.apache.cloudstack.network.BgpPeer; public interface NetworkTopology { @@ -89,4 +90,6 @@ public interface NetworkTopology { final boolean failWhenDisconnect, RuleApplierWrapper ruleApplier) throws ResourceUnavailableException; boolean removeDhcpEntry(final Network network, final NicProfile nic, final VirtualMachineProfile profile, final VirtualRouter virtualRouter) throws ResourceUnavailableException; + + boolean applyBgpPeers(final Network network, final List bpgPeers, final VirtualRouter virtualRouter) throws ResourceUnavailableException; } diff --git a/server/src/main/java/org/apache/cloudstack/network/topology/NetworkTopologyVisitor.java b/server/src/main/java/org/apache/cloudstack/network/topology/NetworkTopologyVisitor.java index 035c67457e5..07bbcc19160 100644 --- a/server/src/main/java/org/apache/cloudstack/network/topology/NetworkTopologyVisitor.java +++ b/server/src/main/java/org/apache/cloudstack/network/topology/NetworkTopologyVisitor.java @@ -20,6 +20,7 @@ package org.apache.cloudstack.network.topology; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.rules.AdvancedVpnRules; import com.cloud.network.rules.BasicVpnRules; +import com.cloud.network.rules.BgpPeersRules; import com.cloud.network.rules.DhcpEntryRules; import com.cloud.network.rules.DhcpPvlanRules; import com.cloud.network.rules.DhcpSubNetRules; @@ -64,4 +65,5 @@ public abstract class NetworkTopologyVisitor { public abstract boolean visit(DhcpSubNetRules dhcpRules) throws ResourceUnavailableException; public abstract boolean visit(NicPlugInOutRules nicPlugInOutRules) throws ResourceUnavailableException; public abstract boolean visit(StaticRoutesRules staticRoutesRules) throws ResourceUnavailableException; + public abstract boolean visit(BgpPeersRules bgpPeersRules) throws ResourceUnavailableException; } diff --git a/server/src/main/java/org/apache/cloudstack/resource/ResourceCleanupServiceImpl.java b/server/src/main/java/org/apache/cloudstack/resource/ResourceCleanupServiceImpl.java new file mode 100644 index 00000000000..50c4de36b7f --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/resource/ResourceCleanupServiceImpl.java @@ -0,0 +1,829 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.resource; + +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Date; +import java.util.HashSet; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.command.admin.resource.PurgeExpungedResourcesCmd; +import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.framework.async.AsyncRpcContext; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; +import org.apache.cloudstack.framework.jobs.dao.VmWorkJobDao; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; +import org.apache.cloudstack.utils.identity.ManagementServerNode; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.EnumUtils; +import org.apache.commons.lang3.ObjectUtils; +import org.apache.commons.lang3.StringUtils; + +import com.cloud.cluster.ManagementServerHostVO; +import com.cloud.cluster.dao.ManagementServerHostDao; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.ha.HighAvailabilityManager; +import com.cloud.network.as.dao.AutoScaleVmGroupVmMapDao; +import com.cloud.network.dao.IPAddressDao; +import com.cloud.network.dao.InlineLoadBalancerNicMapDao; +import com.cloud.network.dao.LoadBalancerVMMapDao; +import com.cloud.network.dao.OpRouterMonitorServiceDao; +import com.cloud.network.rules.dao.PortForwardingRulesDao; +import com.cloud.offering.ServiceOffering; +import com.cloud.secstorage.CommandExecLogDao; +import com.cloud.service.dao.ServiceOfferingDetailsDao; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.SnapshotDetailsDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; +import com.cloud.utils.Pair; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.component.PluggableService; +import com.cloud.utils.concurrency.NamedThreadFactory; +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackWithException; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.ItWorkDao; +import com.cloud.vm.NicVO; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.ConsoleSessionDao; +import com.cloud.vm.dao.NicDao; +import com.cloud.vm.dao.NicDetailsDao; +import com.cloud.vm.dao.NicExtraDhcpOptionDao; +import com.cloud.vm.dao.NicSecondaryIpDao; +import com.cloud.vm.dao.UserVmDetailsDao; +import com.cloud.vm.dao.VMInstanceDao; +import com.cloud.vm.snapshot.VMSnapshotVO; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao; + +public class ResourceCleanupServiceImpl extends ManagerBase implements ResourceCleanupService, PluggableService, + Configurable { + + @Inject + VMInstanceDao vmInstanceDao; + @Inject + VolumeDao volumeDao; + @Inject + VolumeDetailsDao volumeDetailsDao; + @Inject + VolumeDataStoreDao volumeDataStoreDao; + @Inject + SnapshotDao snapshotDao; + @Inject + SnapshotDetailsDao snapshotDetailsDao; + @Inject + SnapshotDataStoreDao snapshotDataStoreDao; + @Inject + NicDao nicDao; + @Inject + NicDetailsDao nicDetailsDao; + @Inject + NicExtraDhcpOptionDao nicExtraDhcpOptionDao; + @Inject + InlineLoadBalancerNicMapDao inlineLoadBalancerNicMapDao; + @Inject + UserVmDetailsDao userVmDetailsDao; + @Inject + VMSnapshotDao vmSnapshotDao; + @Inject + VMSnapshotDetailsDao vmSnapshotDetailsDao; + @Inject + AutoScaleVmGroupVmMapDao autoScaleVmGroupVmMapDao; + @Inject + CommandExecLogDao commandExecLogDao; + @Inject + NetworkOrchestrationService networkOrchestrationService; + @Inject + LoadBalancerVMMapDao loadBalancerVMMapDao; + @Inject + NicSecondaryIpDao nicSecondaryIpDao; + @Inject + HighAvailabilityManager highAvailabilityManager; + @Inject + ItWorkDao itWorkDao; + @Inject + OpRouterMonitorServiceDao opRouterMonitorServiceDao; + @Inject + PortForwardingRulesDao portForwardingRulesDao; + @Inject + IPAddressDao ipAddressDao; + @Inject + VmWorkJobDao vmWorkJobDao; + @Inject + ConsoleSessionDao consoleSessionDao; + @Inject + ManagementServerHostDao managementServerHostDao; + @Inject + ServiceOfferingDetailsDao serviceOfferingDetailsDao; + + private ScheduledExecutorService expungedResourcesCleanupExecutor; + private ExecutorService purgeExpungedResourcesJobExecutor; + + protected void purgeLinkedSnapshotEntities(final List snapshotIds, final Long batchSize) { + if (CollectionUtils.isEmpty(snapshotIds)) { + return; + } + snapshotDetailsDao.batchExpungeForResources(snapshotIds, batchSize); + snapshotDataStoreDao.expungeBySnapshotList(snapshotIds, batchSize); + // Snapshot policies are using ON DELETE CASCADE + } + + protected long purgeVolumeSnapshots(final List volumeIds, final Long batchSize) { + if (CollectionUtils.isEmpty(volumeIds)) { + return 0; + } + SearchBuilder sb = snapshotDao.createSearchBuilder(); + sb.and("volumeIds", sb.entity().getVolumeId(), SearchCriteria.Op.IN); + sb.and("removed", sb.entity().getRemoved(), SearchCriteria.Op.NNULL); + SearchCriteria sc = sb.create(); + sc.setParameters("volumeIds", volumeIds.toArray()); + int removed = 0; + long totalRemoved = 0; + Filter filter = new Filter(SnapshotVO.class, "id", true, 0L, batchSize); + final long batchSizeFinal = ObjectUtils.defaultIfNull(batchSize, 0L); + do { + List snapshots = snapshotDao.searchIncludingRemoved(sc, filter, null, false); + List snapshotIds = snapshots.stream().map(SnapshotVO::getId).collect(Collectors.toList()); + purgeLinkedSnapshotEntities(snapshotIds, batchSize); + removed = snapshotDao.expungeList(snapshotIds); + totalRemoved += removed; + } while (batchSizeFinal > 0 && removed >= batchSizeFinal); + return totalRemoved; + } + + protected void purgeLinkedVolumeEntities(final List volumeIds, final Long batchSize) { + if (CollectionUtils.isEmpty(volumeIds)) { + return; + } + volumeDetailsDao.batchExpungeForResources(volumeIds, batchSize); + volumeDataStoreDao.expungeByVolumeList(volumeIds, batchSize); + purgeVolumeSnapshots(volumeIds, batchSize); + } + + protected long purgeVMVolumes(final List vmIds, final Long batchSize) { + if (CollectionUtils.isEmpty(vmIds)) { + return 0; + } + int removed = 0; + long totalRemoved = 0; + final long batchSizeFinal = ObjectUtils.defaultIfNull(batchSize, 0L); + do { + List volumes = volumeDao.searchRemovedByVms(vmIds, batchSize); + List volumeIds = volumes.stream().map(VolumeVO::getId).collect(Collectors.toList()); + purgeLinkedVolumeEntities(volumeIds, batchSize); + removed = volumeDao.expungeList(volumeIds); + totalRemoved += removed; + } while (batchSizeFinal > 0 && removed >= batchSizeFinal); + return totalRemoved; + } + + protected void purgeLinkedNicEntities(final List nicIds, final Long batchSize) { + if (CollectionUtils.isEmpty(nicIds)) { + return; + } + nicDetailsDao.batchExpungeForResources(nicIds, batchSize); + nicExtraDhcpOptionDao.expungeByNicList(nicIds, batchSize); + inlineLoadBalancerNicMapDao.expungeByNicList(nicIds, batchSize); + } + + protected long purgeVMNics(final List vmIds, final Long batchSize) { + if (CollectionUtils.isEmpty(vmIds)) { + return 0; + } + int removed = 0; + long totalRemoved = 0; + final long batchSizeFinal = ObjectUtils.defaultIfNull(batchSize, 0L); + do { + List nics = nicDao.searchRemovedByVms(vmIds, batchSize); + List nicIds = nics.stream().map(NicVO::getId).collect(Collectors.toList()); + purgeLinkedNicEntities(nicIds, batchSize); + removed = nicDao.expungeList(nicIds); + totalRemoved += removed; + } while (batchSizeFinal > 0 && removed >= batchSizeFinal); + return totalRemoved; + } + + protected long purgeVMSnapshots(final List vmIds, final Long batchSize) { + if (CollectionUtils.isEmpty(vmIds)) { + return 0; + } + int removed = 0; + long totalRemoved = 0; + final long batchSizeFinal = ObjectUtils.defaultIfNull(batchSize, 0L); + do { + List vmSnapshots = vmSnapshotDao.searchRemovedByVms(vmIds, batchSize); + List ids = vmSnapshots.stream().map(VMSnapshotVO::getId).collect(Collectors.toList()); + vmSnapshotDetailsDao.batchExpungeForResources(ids, batchSize); + removed = vmSnapshotDao.expungeList(ids); + totalRemoved += removed; + } while (batchSizeFinal > 0 && removed >= batchSizeFinal); + return totalRemoved; + } + + protected void purgeLinkedVMEntities(final List vmIds, final Long batchSize) { + if (CollectionUtils.isEmpty(vmIds)) { + return; + } + purgeVMVolumes(vmIds, batchSize); + purgeVMNics(vmIds, batchSize); + userVmDetailsDao.batchExpungeForResources(vmIds, batchSize); + purgeVMSnapshots(vmIds, batchSize); + autoScaleVmGroupVmMapDao.expungeByVmList(vmIds, batchSize); + commandExecLogDao.expungeByVmList(vmIds, batchSize); + networkOrchestrationService.expungeLbVmRefs(vmIds, batchSize); + loadBalancerVMMapDao.expungeByVmList(vmIds, batchSize); + nicSecondaryIpDao.expungeByVmList(vmIds, batchSize); + highAvailabilityManager.expungeWorkItemsByVmList(vmIds, batchSize); + itWorkDao.expungeByVmList(vmIds, batchSize); + opRouterMonitorServiceDao.expungeByVmList(vmIds, batchSize); + portForwardingRulesDao.expungeByVmList(vmIds, batchSize); + ipAddressDao.expungeByVmList(vmIds, batchSize); + vmWorkJobDao.expungeByVmList(vmIds, batchSize); + consoleSessionDao.expungeByVmList(vmIds, batchSize); + } + + protected HashSet getVmIdsWithActiveVolumeSnapshots(List vmIds) { + if (CollectionUtils.isEmpty(vmIds)) { + return new HashSet<>(); + } + List volumes = volumeDao.searchRemovedByVms(vmIds, null); + List volumeIds = volumes.stream().map(VolumeVO::getId).collect(Collectors.toList()); + List activeSnapshots = snapshotDao.searchByVolumes(volumeIds); + HashSet activeSnapshotVolumeIds = + activeSnapshots.stream().map(SnapshotVO::getVolumeId).collect(Collectors.toCollection(HashSet::new)); + List volumesWithActiveSnapshots = + volumes.stream().filter(v -> activeSnapshotVolumeIds.contains(v.getId())).collect(Collectors.toList()); + return volumesWithActiveSnapshots.stream().map(VolumeVO::getInstanceId) + .collect(Collectors.toCollection(HashSet::new)); + } + + protected Pair, List> getFilteredVmIdsForSnapshots(List vmIds) { + HashSet currentSkippedVmIds = new HashSet<>(); + List activeSnapshots = vmSnapshotDao.searchByVms(vmIds); + if (CollectionUtils.isNotEmpty(activeSnapshots)) { + HashSet vmIdsWithActiveSnapshots = activeSnapshots.stream().map(VMSnapshotVO::getVmId) + .collect(Collectors.toCollection(HashSet::new)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Skipping purging VMs with IDs %s as they have active " + + "VM snapshots", StringUtils.join(vmIdsWithActiveSnapshots))); + } + currentSkippedVmIds.addAll(vmIdsWithActiveSnapshots); + } + HashSet vmIdsWithActiveVolumeSnapshots = getVmIdsWithActiveVolumeSnapshots(vmIds); + if (CollectionUtils.isNotEmpty(vmIdsWithActiveVolumeSnapshots)) { + if (logger.isDebugEnabled()) { + logger.debug(String.format("Skipping purging VMs with IDs %s as they have volumes with active " + + "snapshots", StringUtils.join(vmIdsWithActiveVolumeSnapshots))); + } + currentSkippedVmIds.addAll(vmIdsWithActiveVolumeSnapshots); + } + if (CollectionUtils.isNotEmpty(currentSkippedVmIds)) { + vmIds.removeAll(currentSkippedVmIds); + } + return new Pair<>(vmIds, new ArrayList<>(currentSkippedVmIds)); + } + + protected Pair, List> getVmIdsWithNoActiveSnapshots(final Date startDate, final Date endDate, + final Long batchSize, final List skippedVmIds) { + List vms = vmInstanceDao.searchRemovedByRemoveDate(startDate, endDate, batchSize, skippedVmIds); + if (CollectionUtils.isEmpty(vms)) { + return new Pair<>(new ArrayList<>(), new ArrayList<>()); + } + List vmIds = vms.stream().map(VMInstanceVO::getId).collect(Collectors.toList()); + return getFilteredVmIdsForSnapshots(vmIds); + } + + protected long purgeVMEntities(final Long batchSize, final Date startDate, final Date endDate) { + return Transaction.execute((TransactionCallbackWithException) status -> { + int count; + long totalRemoved = 0; + final long batchSizeFinal = ObjectUtils.defaultIfNull(batchSize, 0L); + List skippedVmIds = new ArrayList<>(); + do { + Pair, List> allVmIds = + getVmIdsWithNoActiveSnapshots(startDate, endDate, batchSize, skippedVmIds); + List vmIds = allVmIds.first(); + List currentSkippedVmIds = allVmIds.second(); + count = vmIds.size() + currentSkippedVmIds.size(); + skippedVmIds.addAll(currentSkippedVmIds); + purgeLinkedVMEntities(vmIds, batchSize); + totalRemoved += vmInstanceDao.expungeList(vmIds); + } while (batchSizeFinal > 0 && count >= batchSizeFinal); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Purged total %d VM records", totalRemoved)); + } + return totalRemoved; + }); + } + + protected boolean purgeVMEntity(final long vmId) { + return Transaction.execute((TransactionCallbackWithException) status -> { + final Long batchSize = ExpungedResourcesPurgeBatchSize.value().longValue(); + List vmIds = new ArrayList<>(); + vmIds.add(vmId); + Pair, List> allVmIds = getFilteredVmIdsForSnapshots(vmIds); + if (CollectionUtils.isEmpty(allVmIds.first())) { + return false; + } + purgeLinkedVMEntities(vmIds, batchSize); + return vmInstanceDao.expunge(vmId); + }); + } + + protected long purgeEntities(final List resourceTypes, final Long batchSize, + final Date startDate, final Date endDate) { + if (logger.isTraceEnabled()) { + logger.trace(String.format("Expunging entities with parameters - resourceType: %s, batchSize: %d, " + + "startDate: %s, endDate: %s", StringUtils.join(resourceTypes), batchSize, startDate, endDate)); + } + long totalPurged = 0; + if (CollectionUtils.isEmpty(resourceTypes) || resourceTypes.contains(ResourceType.VirtualMachine)) { + totalPurged += purgeVMEntities(batchSize, startDate, endDate); + } + return totalPurged; + } + + protected Void purgeExpungedResourcesCallback( + AsyncCallbackDispatcher callback, + PurgeExpungedResourcesContext context) { + PurgeExpungedResourcesResult result = callback.getResult(); + context.future.complete(result); + return null; + } + + protected ResourceType getResourceTypeAndValidatePurgeExpungedResourcesCmdParams(final String resourceTypeStr, + final Date startDate, final Date endDate, final Long batchSize) { + ResourceType resourceType = null; + if (StringUtils.isNotBlank(resourceTypeStr)) { + resourceType = EnumUtils.getEnumIgnoreCase(ResourceType.class, resourceTypeStr, null); + if (resourceType == null) { + throw new InvalidParameterValueException("Invalid resource type specified"); + } + } + if (batchSize != null && batchSize <= 0) { + throw new InvalidParameterValueException(String.format("Invalid %s specified", ApiConstants.BATCH_SIZE)); + } + if (endDate != null && startDate != null && endDate.before(startDate)) { + throw new InvalidParameterValueException(String.format("Invalid %s specified", ApiConstants.END_DATE)); + } + return resourceType; + } + + protected long purgeExpungedResourceUsingJob(final ResourceType resourceType, final Long batchSize, + final Date startDate, final Date endDate) { + AsyncCallFuture future = new AsyncCallFuture<>(); + PurgeExpungedResourcesContext context = + new PurgeExpungedResourcesContext<>(null, future); + AsyncCallbackDispatcher caller = + AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().purgeExpungedResourcesCallback(null, null)) + .setContext(context); + PurgeExpungedResourceThread job = new PurgeExpungedResourceThread(resourceType, batchSize, startDate, endDate, + caller); + purgeExpungedResourcesJobExecutor.submit(job); + long expungedCount; + try { + PurgeExpungedResourcesResult result = future.get(); + if (result.isFailed()) { + throw new CloudRuntimeException(String.format("Failed to purge expunged resources due to: %s", result.getResult())); + } + expungedCount = result.getPurgedCount(); + } catch (InterruptedException | ExecutionException e) { + logger.error(String.format("Failed to purge expunged resources due to: %s", e.getMessage()), e); + throw new CloudRuntimeException("Failed to purge expunged resources"); + } + return expungedCount; + } + + protected boolean isVmOfferingPurgeResourcesEnabled(long vmServiceOfferingId) { + String detail = + serviceOfferingDetailsDao.getDetail(vmServiceOfferingId, ServiceOffering.PURGE_DB_ENTITIES_KEY); + return StringUtils.isNotBlank(detail) && Boolean.parseBoolean(detail); + } + + protected boolean purgeExpungedResource(long resourceId, ResourceType resourceType) { + if (!ResourceType.VirtualMachine.equals(resourceType)) { + return false; + } + return purgeVMEntity(resourceId); + } + + protected void purgeExpungedResourceLater(long resourceId, ResourceType resourceType) { + AsyncCallFuture future = new AsyncCallFuture<>(); + PurgeExpungedResourcesContext context = + new PurgeExpungedResourcesContext<>(null, future); + AsyncCallbackDispatcher caller = + AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().purgeExpungedResourcesCallback(null, null)) + .setContext(context); + PurgeExpungedResourceThread job = new PurgeExpungedResourceThread(resourceId, resourceType, caller); + purgeExpungedResourcesJobExecutor.submit(job); + } + + protected Date parseDateFromConfig(String configKey, String configValue) { + if (StringUtils.isBlank(configValue)) { + return null; + } + final List dateFormats = List.of("yyyy-MM-dd HH:mm:ss", "yyyy-MM-dd"); + Date date = null; + for (String format : dateFormats) { + final SimpleDateFormat dateFormat = new SimpleDateFormat(format); + try { + date = dateFormat.parse(configValue); + break; + } catch (ParseException e) { + logger.trace(String.format("Unable to parse value for config %s: %s with date " + + "format: %s due to %s", configKey, configValue, format, e.getMessage())); + } + } + if (date == null) { + throw new CloudRuntimeException(String.format("Unable to parse value for config %s: %s with date " + + "formats: %s", configKey, configValue, StringUtils.join(dateFormats))); + } + return date; + } + + protected Date getStartDateFromConfig() { + return parseDateFromConfig(ExpungedResourcesPurgeStartTime.key(), ExpungedResourcesPurgeStartTime.value()); + } + + protected Date calculatePastDateFromConfig(String configKey, Integer configValue) { + if (configValue == null || configValue == 0) { + return null; + } + if (configValue < 0) { + throw new CloudRuntimeException(String.format("Unable to retrieve a valid value for config %s: %s", + configKey, configValue)); + } + Calendar cal = Calendar.getInstance(); + Date endDate = new Date(); + cal.setTime(endDate); + cal.add(Calendar.DATE, -1 * configValue); + return cal.getTime(); + } + + protected Date getEndDateFromConfig() { + return calculatePastDateFromConfig(ExpungedResourcesPurgeKeepPastDays.key(), + ExpungedResourcesPurgeKeepPastDays.value()); + } + + protected List getResourceTypesFromConfig() { + String resourceTypesConfig = ExpungedResourcePurgeResources.value(); + if (StringUtils.isBlank(resourceTypesConfig)) { + return null; + } + List resourceTypes = new ArrayList<>(); + for (String type : resourceTypesConfig.split(",")) { + ResourceType resourceType = EnumUtils.getEnum(ResourceType.class, type.trim(), null); + if (resourceType == null) { + throw new CloudRuntimeException(String.format("Invalid resource type: '%s' specified in " + + "the config: %s", type, ExpungedResourcePurgeResources.key())); + } + resourceTypes.add(resourceType); + } + return resourceTypes; + } + + protected long getBatchSizeFromConfig() { + Integer batchSize = ExpungedResourcesPurgeBatchSize.value(); + if (batchSize == null || batchSize <= 0) { + throw new CloudRuntimeException(String.format("Unable to retrieve a valid value for config %s: %s", + ExpungedResourcesPurgeBatchSize.key(), batchSize)); + } + return batchSize.longValue(); + } + + @Override + public long purgeExpungedResources(PurgeExpungedResourcesCmd cmd) { + final String resourceTypeStr = cmd.getResourceType(); + final Date startDate = cmd.getStartDate(); + final Date endDate = cmd.getEndDate(); + Long batchSize = cmd.getBatchSize(); + ResourceType resourceType = getResourceTypeAndValidatePurgeExpungedResourcesCmdParams(resourceTypeStr, + startDate, endDate, batchSize); + Integer globalBatchSize = ExpungedResourcesPurgeBatchSize.value(); + if (batchSize == null && globalBatchSize > 0) { + batchSize = globalBatchSize.longValue(); + } + long expungedCount = purgeExpungedResourceUsingJob(resourceType, batchSize, startDate, endDate); + if (expungedCount <= 0) { + logger.debug("No resource expunged during purgeExpungedResources execution"); + } + return expungedCount; + } + + @Override + public void purgeExpungedVmResourcesLaterIfNeeded(VirtualMachine vm) { + if (!isVmOfferingPurgeResourcesEnabled(vm.getServiceOfferingId())) { + return; + } + purgeExpungedResourceLater(vm.getId(), ResourceType.VirtualMachine); + } + + @Override + public boolean start() { + if (Boolean.TRUE.equals(ExpungedResourcePurgeEnabled.value())) { + expungedResourcesCleanupExecutor = new ScheduledThreadPoolExecutor(1, + new NamedThreadFactory("ExpungedResourceCleanupWorker")); + expungedResourcesCleanupExecutor.scheduleWithFixedDelay(new ExpungedResourceCleanupWorker(), + ExpungedResourcesPurgeDelay.value(), ExpungedResourcesPurgeInterval.value(), TimeUnit.SECONDS); + } + purgeExpungedResourcesJobExecutor = Executors.newFixedThreadPool(3, + new NamedThreadFactory("Purge-Expunged-Resources-Job-Executor")); + return true; + } + + @Override + public boolean stop() { + purgeExpungedResourcesJobExecutor.shutdown(); + if (expungedResourcesCleanupExecutor != null) { + expungedResourcesCleanupExecutor.shutdownNow(); + } + return true; + } + + @Override + public List> getCommands() { + final List> cmdList = new ArrayList<>(); + cmdList.add(PurgeExpungedResourcesCmd.class); + return cmdList; + } + + @Override + public String getConfigComponentName() { + return ResourceCleanupService.class.getName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[]{ + ExpungedResourcePurgeEnabled, + ExpungedResourcePurgeResources, + ExpungedResourcesPurgeInterval, + ExpungedResourcesPurgeDelay, + ExpungedResourcesPurgeBatchSize, + ExpungedResourcesPurgeStartTime, + ExpungedResourcesPurgeKeepPastDays, + ExpungedResourcePurgeJobDelay + }; + } + + public class ExpungedResourceCleanupWorker extends ManagedContextRunnable { + @Override + protected void runInContext() { + GlobalLock gcLock = GlobalLock.getInternLock("Expunged.Resource.Cleanup.Lock"); + try { + if (gcLock.lock(3)) { + try { + runCleanupForLongestRunningManagementServer(); + } finally { + gcLock.unlock(); + } + } + } finally { + gcLock.releaseRef(); + } + } + + protected void runCleanupForLongestRunningManagementServer() { + ManagementServerHostVO msHost = managementServerHostDao.findOneByLongestRuntime(); + if (msHost == null || (msHost.getMsid() != ManagementServerNode.getManagementServerId())) { + logger.debug("Skipping the expunged resource cleanup task on this management server"); + return; + } + reallyRun(); + } + + public void reallyRun() { + try { + Date startDate = getStartDateFromConfig(); + Date endDate = getEndDateFromConfig(); + List resourceTypes = getResourceTypesFromConfig(); + long batchSize = getBatchSizeFromConfig(); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Purging resources: %s as part of cleanup with start date: %s, " + + "end date: %s and batch size: %d", StringUtils.join(resourceTypes), startDate, endDate, batchSize)); + } + purgeEntities(resourceTypes, batchSize, startDate, endDate); + } catch (Exception e) { + logger.warn("Caught exception while running expunged resources cleanup task: ", e); + } + } + } + + protected class PurgeExpungedResourceThread extends ManagedContextRunnable { + ResourceType resourceType; + Long resourceId; + Long batchSize; + Date startDate; + Date endDate; + AsyncCompletionCallback callback; + long taskTimestamp; + + public PurgeExpungedResourceThread(final ResourceType resourceType, final Long batchSize, + final Date startDate, final Date endDate, + AsyncCompletionCallback callback) { + this.resourceType = resourceType; + this.batchSize = batchSize; + this.startDate = startDate; + this.endDate = endDate; + this.callback = callback; + } + + public PurgeExpungedResourceThread(final Long resourceId, final ResourceType resourceType, + AsyncCompletionCallback callback) { + this.resourceType = resourceType; + this.resourceId = resourceId; + this.callback = callback; + this.taskTimestamp = System.currentTimeMillis(); + } + + @Override + protected void runInContext() { + logger.trace(String.format("Executing purge for resource type: %s with batch size: %d start: %s, end: %s", + resourceType, batchSize, startDate, endDate)); + reallyRun(); + } + + protected void waitForPurgeSingleResourceDelay(String resourceAsString) throws InterruptedException { + long jobDelayConfig = ExpungedResourcePurgeJobDelay.value(); + if (jobDelayConfig < MINIMUM_EXPUNGED_RESOURCE_PURGE_JOB_DELAY_IN_SECONDS) { + logger.debug(String.format("Value: %d for config: %s is lesser than the minimum value: %d, " + + "using minimum value", + jobDelayConfig, + ExpungedResourcePurgeJobDelay.key(), + MINIMUM_EXPUNGED_RESOURCE_PURGE_JOB_DELAY_IN_SECONDS)); + jobDelayConfig = MINIMUM_EXPUNGED_RESOURCE_PURGE_JOB_DELAY_IN_SECONDS; + } + long delay = (jobDelayConfig * 1000) - + (System.currentTimeMillis() - taskTimestamp); + + if (delay > 0) { + if (logger.isTraceEnabled()) { + logger.trace(String.format("Waiting for %d before purging %s", delay, resourceAsString)); + } + Thread.sleep(delay); + } + } + + protected void purgeSingleResource() { + String resourceAsString = String.format("resource [type: %s, ID: %d]", resourceType, resourceId); + try { + waitForPurgeSingleResourceDelay(resourceAsString); + if (!purgeExpungedResource(resourceId, resourceType)) { + throw new CloudRuntimeException(String.format("Failed to purge %s", resourceAsString)); + } + if (logger.isDebugEnabled()) { + logger.info(String.format("Purged %s", resourceAsString)); + } + callback.complete(new PurgeExpungedResourcesResult(resourceId, resourceType, null)); + } catch (CloudRuntimeException e) { + logger.error(String.format("Caught exception while purging %s: ", resourceAsString), e); + callback.complete(new PurgeExpungedResourcesResult(resourceId, resourceType, e.getMessage())); + } catch (InterruptedException e) { + logger.error(String.format("Caught exception while waiting for purging %s: ", resourceAsString), e); + callback.complete(new PurgeExpungedResourcesResult(resourceId, resourceType, e.getMessage())); + } + } + + protected void purgeMultipleResources() { + try { + long purged = purgeEntities(resourceType == null ? null : List.of(resourceType), + batchSize, startDate, endDate); + callback.complete(new PurgeExpungedResourcesResult(resourceType, batchSize, startDate, endDate, purged)); + } catch (CloudRuntimeException e) { + logger.error("Caught exception while expunging resources: ", e); + callback.complete(new PurgeExpungedResourcesResult(resourceType, batchSize, startDate, endDate, e.getMessage())); + } + } + + public void reallyRun() { + if (resourceId != null) { + purgeSingleResource(); + return; + } + purgeMultipleResources(); + } + } + + public static class PurgeExpungedResourcesResult extends CommandResult { + ResourceType resourceType; + Long resourceId; + Long batchSize; + Date startDate; + Date endDate; + Long purgedCount; + + public PurgeExpungedResourcesResult(final ResourceType resourceType, final Long batchSize, + final Date startDate, final Date endDate, final long purgedCount) { + super(); + this.resourceType = resourceType; + this.batchSize = batchSize; + this.startDate = startDate; + this.endDate = endDate; + this.purgedCount = purgedCount; + this.setSuccess(true); + } + + public PurgeExpungedResourcesResult(final ResourceType resourceType, final Long batchSize, + final Date startDate, final Date endDate, final String error) { + super(); + this.resourceType = resourceType; + this.batchSize = batchSize; + this.startDate = startDate; + this.endDate = endDate; + this.setResult(error); + } + + public PurgeExpungedResourcesResult(final Long resourceId, final ResourceType resourceType, + final String error) { + super(); + this.resourceId = resourceId; + this.resourceType = resourceType; + if (error != null) { + this.setResult(error); + } else { + this.purgedCount = 1L; + this.setSuccess(true); + } + } + + public ResourceType getResourceType() { + return resourceType; + } + + public Long getResourceId() { + return resourceId; + } + + public Long getBatchSize() { + return batchSize; + } + + public Date getStartDate() { + return startDate; + } + + public Date getEndDate() { + return endDate; + } + + public Long getPurgedCount() { + return purgedCount; + } + } + + public static class PurgeExpungedResourcesContext extends AsyncRpcContext { + final AsyncCallFuture future; + + public PurgeExpungedResourcesContext(AsyncCompletionCallback callback, + AsyncCallFuture future) { + super(callback); + this.future = future; + } + + } +} diff --git a/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java b/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java index d7c3f1033f2..64d9b3467e3 100644 --- a/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java +++ b/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java @@ -93,7 +93,9 @@ public class SnapshotHelper { */ public void expungeTemporarySnapshot(boolean kvmSnapshotOnlyInPrimaryStorage, SnapshotInfo snapInfo) { if (!kvmSnapshotOnlyInPrimaryStorage) { - logger.trace(String.format("Snapshot [%s] is not a temporary backup to create a volume from snapshot. Not expunging it.", snapInfo.getId())); + if (snapInfo != null) { + logger.trace(String.format("Snapshot [%s] is not a temporary backup to create a volume from snapshot. Not expunging it.", snapInfo.getId())); + } return; } @@ -102,15 +104,21 @@ public class SnapshotHelper { return; } - logger.debug(String.format("Expunging snapshot [%s] due to it is a temporary backup to create a volume from snapshot. It is occurring because the global setting [%s]" - + " has the value [%s].", snapInfo.getId(), SnapshotInfo.BackupSnapshotAfterTakingSnapshot.key(), backupSnapshotAfterTakingSnapshot)); + if (!DataStoreRole.Image.equals(snapInfo.getDataStore().getRole())) { + logger.debug(String.format("Expunge template for Snapshot [%s] is called for primary storage role. Not expunging it, " + + "but we will still expunge the database reference of the snapshot for image storage role if any", snapInfo.getId())); + } else { + logger.debug(String.format("Expunging snapshot [%s] due to it is a temporary backup to create a volume from snapshot. It is occurring because the global setting [%s]" + + " has the value [%s].", snapInfo.getId(), SnapshotInfo.BackupSnapshotAfterTakingSnapshot.key(), backupSnapshotAfterTakingSnapshot)); - try { - snapshotService.deleteSnapshot(snapInfo); - } catch (CloudRuntimeException ex) { - logger.warn(String.format("Unable to delete the temporary snapshot [%s] on secondary storage due to [%s]. We still will expunge the database reference, consider" - + " manually deleting the file [%s].", snapInfo.getId(), ex.getMessage(), snapInfo.getPath()), ex); + try { + snapshotService.deleteSnapshot(snapInfo); + } catch (CloudRuntimeException ex) { + logger.warn(String.format("Unable to delete the temporary snapshot [%s] on secondary storage due to [%s]. We still will expunge the database reference, consider" + + " manually deleting the file [%s].", snapInfo.getId(), ex.getMessage(), snapInfo.getPath()), ex); + } } + long storeId = snapInfo.getDataStore().getId(); if (!DataStoreRole.Image.equals(snapInfo.getDataStore().getRole())) { long zoneId = dataStorageManager.getStoreZoneId(storeId, snapInfo.getDataStore().getRole()); diff --git a/server/src/main/java/org/apache/cloudstack/storage/browser/StorageBrowserImpl.java b/server/src/main/java/org/apache/cloudstack/storage/browser/StorageBrowserImpl.java index 8828ac486f5..daa20464f3b 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/browser/StorageBrowserImpl.java +++ b/server/src/main/java/org/apache/cloudstack/storage/browser/StorageBrowserImpl.java @@ -233,14 +233,20 @@ public class StorageBrowserImpl extends MutualExclusiveIdsManagerBase implements new Date(answer.getLastModified().get(i))); String filePath = paths.get(i); if (pathTemplateMap.get(filePath) != null) { - response.setTemplateId(pathTemplateMap.get(filePath).getUuid()); - response.setFormat(pathTemplateMap.get(filePath).getFormat().toString()); + VMTemplateVO vmTemplateVO = pathTemplateMap.get(filePath); + response.setTemplateId(vmTemplateVO.getUuid()); + response.setFormat(vmTemplateVO.getFormat().toString()); + response.setTemplateName(vmTemplateVO.getName()); } if (pathSnapshotMap.get(filePath) != null) { - response.setSnapshotId(pathSnapshotMap.get(filePath).getUuid()); + SnapshotVO snapshotVO = pathSnapshotMap.get(filePath); + response.setSnapshotId(snapshotVO.getUuid()); + response.setSnapshotName(snapshotVO.getName()); } if (pathVolumeMap.get(filePath) != null) { - response.setVolumeId(pathVolumeMap.get(filePath).getUuid()); + VolumeVO volumeVO = pathVolumeMap.get(filePath); + response.setVolumeId(volumeVO.getUuid()); + response.setVolumeName(volumeVO.getName()); } responses.add(response); } diff --git a/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java b/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java index e6acd180f16..58b41d6a55d 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java @@ -18,6 +18,7 @@ package org.apache.cloudstack.storage.object; import com.amazonaws.services.s3.internal.BucketNameUtils; import com.amazonaws.services.s3.model.IllegalBucketNameException; +import com.cloud.agent.api.to.BucketTO; import com.cloud.event.ActionEvent; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -136,29 +137,30 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic ObjectStoreVO objectStoreVO = _objectStoreDao.findById(cmd.getObjectStoragePoolId()); ObjectStoreEntity objectStore = (ObjectStoreEntity)_dataStoreMgr.getDataStore(objectStoreVO.getId(), DataStoreRole.Object); BucketVO bucket = _bucketDao.findById(cmd.getEntityId()); + BucketTO bucketTO = new BucketTO(bucket); boolean objectLock = false; boolean bucketCreated = false; if(cmd.isObjectLocking()) { objectLock = true; } try { - objectStore.createBucket(bucket, objectLock); + bucketTO = new BucketTO(objectStore.createBucket(bucket, objectLock)); bucketCreated = true; if (cmd.isVersioning()) { - objectStore.setBucketVersioning(bucket.getName()); + objectStore.setBucketVersioning(bucketTO); } if (cmd.isEncryption()) { - objectStore.setBucketEncryption(bucket.getName()); + objectStore.setBucketEncryption(bucketTO); } if (cmd.getQuota() != null) { - objectStore.setQuota(bucket.getName(), cmd.getQuota()); + objectStore.setQuota(bucketTO, cmd.getQuota()); } if (cmd.getPolicy() != null) { - objectStore.setBucketPolicy(bucket.getName(), cmd.getPolicy()); + objectStore.setBucketPolicy(bucketTO, cmd.getPolicy()); } bucket.setState(Bucket.State.Created); @@ -166,7 +168,7 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic } catch (Exception e) { logger.debug("Failed to create bucket with name: "+bucket.getName(), e); if(bucketCreated) { - objectStore.deleteBucket(bucket.getName()); + objectStore.deleteBucket(bucketTO); } _bucketDao.remove(bucket.getId()); throw new CloudRuntimeException("Failed to create bucket with name: "+bucket.getName()+". "+e.getMessage()); @@ -178,13 +180,14 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic @ActionEvent(eventType = EventTypes.EVENT_BUCKET_DELETE, eventDescription = "deleting bucket") public boolean deleteBucket(long bucketId, Account caller) { Bucket bucket = _bucketDao.findById(bucketId); + BucketTO bucketTO = new BucketTO(bucket); if (bucket == null) { throw new InvalidParameterValueException("Unable to find bucket with ID: " + bucketId); } _accountMgr.checkAccess(caller, null, true, bucket); ObjectStoreVO objectStoreVO = _objectStoreDao.findById(bucket.getObjectStoreId()); ObjectStoreEntity objectStore = (ObjectStoreEntity)_dataStoreMgr.getDataStore(objectStoreVO.getId(), DataStoreRole.Object); - if (objectStore.deleteBucket(bucket.getName())) { + if (objectStore.deleteBucket(bucketTO)) { return _bucketDao.remove(bucketId); } return false; @@ -194,6 +197,7 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic @ActionEvent(eventType = EventTypes.EVENT_BUCKET_UPDATE, eventDescription = "updating bucket") public boolean updateBucket(UpdateBucketCmd cmd, Account caller) { BucketVO bucket = _bucketDao.findById(cmd.getId()); + BucketTO bucketTO = new BucketTO(bucket); if (bucket == null) { throw new InvalidParameterValueException("Unable to find bucket with ID: " + cmd.getId()); } @@ -203,29 +207,29 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic try { if (cmd.getEncryption() != null) { if (cmd.getEncryption()) { - objectStore.setBucketEncryption(bucket.getName()); + objectStore.setBucketEncryption(bucketTO); } else { - objectStore.deleteBucketEncryption(bucket.getName()); + objectStore.deleteBucketEncryption(bucketTO); } bucket.setEncryption(cmd.getEncryption()); } if (cmd.getVersioning() != null) { if (cmd.getVersioning()) { - objectStore.setBucketVersioning(bucket.getName()); + objectStore.setBucketVersioning(bucketTO); } else { - objectStore.deleteBucketVersioning(bucket.getName()); + objectStore.deleteBucketVersioning(bucketTO); } bucket.setVersioning(cmd.getVersioning()); } if (cmd.getPolicy() != null) { - objectStore.setBucketPolicy(bucket.getName(), cmd.getPolicy()); + objectStore.setBucketPolicy(bucketTO, cmd.getPolicy()); bucket.setPolicy(cmd.getPolicy()); } if (cmd.getQuota() != null) { - objectStore.setQuota(bucket.getName(), cmd.getQuota()); + objectStore.setQuota(bucketTO, cmd.getQuota()); bucket.setQuota(cmd.getQuota()); } _bucketDao.update(bucket.getId(), bucket); diff --git a/server/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSServiceImpl.java b/server/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSServiceImpl.java new file mode 100644 index 00000000000..072f7d4cd3e --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/storage/sharedfs/SharedFSServiceImpl.java @@ -0,0 +1,720 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.sharedfs; + +import static org.apache.cloudstack.storage.sharedfs.SharedFS.SharedFSCleanupDelay; +import static org.apache.cloudstack.storage.sharedfs.SharedFS.SharedFSCleanupInterval; +import static org.apache.cloudstack.storage.sharedfs.SharedFS.SharedFSFeatureEnabled; +import static org.apache.cloudstack.storage.sharedfs.SharedFS.SharedFSExpungeWorkers; + +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import com.cloud.configuration.ConfigurationManager; +import com.cloud.dc.DataCenter; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ManagementServerException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.VirtualMachineMigrationException; +import com.cloud.network.Network; +import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkVO; +import com.cloud.org.Grouping; +import com.cloud.projects.Project; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.VolumeApiService; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.Pair; +import com.cloud.utils.Ternary; +import com.cloud.utils.component.PluggableService; +import com.cloud.utils.concurrency.NamedThreadFactory; +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.db.JoinBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.utils.fsm.StateMachine2; + +import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.command.user.storage.sharedfs.ChangeSharedFSDiskOfferingCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.ChangeSharedFSServiceOfferingCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.CreateSharedFSCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.ExpungeSharedFSCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.ListSharedFSProvidersCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.ListSharedFSCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.DestroySharedFSCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.RecoverSharedFSCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.RestartSharedFSCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.StartSharedFSCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.StopSharedFSCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.UpdateSharedFSCmd; +import org.apache.cloudstack.api.response.SharedFSResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.storage.sharedfs.dao.SharedFSDao; +import org.apache.cloudstack.storage.sharedfs.SharedFS.Event; +import org.apache.cloudstack.storage.sharedfs.SharedFS.State; +import org.apache.cloudstack.storage.sharedfs.query.dao.SharedFSJoinDao; +import org.apache.cloudstack.storage.sharedfs.query.vo.SharedFSJoinVO; + +import com.cloud.event.ActionEvent; +import com.cloud.event.EventTypes; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.NicVO; +import com.cloud.vm.dao.NicDao; + +public class SharedFSServiceImpl extends ManagerBase implements SharedFSService, Configurable, PluggableService { + + @Inject + private AccountManager accountMgr; + + @Inject + private DataCenterDao dataCenterDao; + + @Inject + private ConfigurationManager configMgr; + + @Inject + private VolumeApiService volumeApiService; + + @Inject + private SharedFSDao sharedFSDao; + + @Inject + private SharedFSJoinDao sharedFSJoinDao; + + @Inject + private DiskOfferingDao diskOfferingDao; + + @Inject + ConfigurationDao configDao; + + @Inject + VolumeDao volumeDao; + + @Inject + NetworkDao networkDao; + + @Inject + NicDao nicDao; + + protected List sharedFSProviders; + + private Map sharedFSProviderMap = new HashMap<>(); + + protected final StateMachine2 sharedFSStateMachine; + + ScheduledExecutorService _executor = null; + + public SharedFSServiceImpl() { + this.sharedFSStateMachine = State.getStateMachine(); + } + + @Override + public boolean start() { + sharedFSProviderMap.clear(); + for (final SharedFSProvider provider : sharedFSProviders) { + sharedFSProviderMap.put(provider.getName(), provider); + provider.configure(); + } + _executor.scheduleWithFixedDelay(new SharedFSGarbageCollector(), SharedFSCleanupInterval.value(), SharedFSCleanupInterval.value(), TimeUnit.SECONDS); + return true; + } + + public boolean stop() { + _executor.shutdown(); + return true; + } + + @Override + public List getSharedFSProviders() { + return sharedFSProviders; + } + + @Override + public boolean stateTransitTo(SharedFS sharedFS, Event event) { + try { + return sharedFSStateMachine.transitTo(sharedFS, event, null, sharedFSDao); + } catch (NoTransitionException e) { + String message = String.format("State transit error for Shared FileSystem %s [%s] due to exception: %s.", + sharedFS.getName(), sharedFS.getId(), e.getMessage()); + logger.error(message, e); + throw new CloudRuntimeException(message, e); + } + } + + @Override + public void setSharedFSProviders(List sharedFSProviders) { + this.sharedFSProviders = sharedFSProviders; + } + + @Override + public SharedFSProvider getSharedFSProvider(String sharedFSProviderName) { + if (sharedFSProviderMap.containsKey(sharedFSProviderName)) { + return sharedFSProviderMap.get(sharedFSProviderName); + } + throw new CloudRuntimeException("Invalid Shared FileSystem provider name!"); + } + + public boolean configure(final String name, final Map params) throws ConfigurationException { + int wrks = SharedFSExpungeWorkers.value(); + _executor = Executors.newScheduledThreadPool(wrks, new NamedThreadFactory("SharedFS-Scavenger")); + return true; + } + + public List> getCommands() { + final List> cmdList = new ArrayList<>(); + if (SharedFSFeatureEnabled.value()) { + cmdList.add(ListSharedFSProvidersCmd.class); + cmdList.add(CreateSharedFSCmd.class); + cmdList.add(ListSharedFSCmd.class); + cmdList.add(UpdateSharedFSCmd.class); + cmdList.add(DestroySharedFSCmd.class); + cmdList.add(RestartSharedFSCmd.class); + cmdList.add(StartSharedFSCmd.class); + cmdList.add(StopSharedFSCmd.class); + cmdList.add(ChangeSharedFSDiskOfferingCmd.class); + cmdList.add(ChangeSharedFSServiceOfferingCmd.class); + cmdList.add(RecoverSharedFSCmd.class); + cmdList.add(ExpungeSharedFSCmd.class); + } + return cmdList; + } + + private DataCenter validateAndGetZone(Long zoneId) { + DataCenter zone = dataCenterDao.findById(zoneId); + if (zone == null) { + throw new InvalidParameterValueException("Unable to find zone by ID: " + zoneId); + } + if (zone.getAllocationState() == Grouping.AllocationState.Disabled) { + throw new PermissionDeniedException(String.format("Cannot perform this operation, zone ID: %s is currently disabled", zone.getUuid())); + } + if (zone.getNetworkType() == DataCenter.NetworkType.Basic || + zone.isSecurityGroupEnabled()) { + throw new PermissionDeniedException("This feature is supported only on Advanced Zone without security groups"); + } + return zone; + } + + private void validateDiskOffering(Long diskOfferingId, Long size, Long minIops, Long maxIops, DataCenter zone) { + Account caller = CallContext.current().getCallingAccount(); + DiskOfferingVO diskOffering = diskOfferingDao.findById(diskOfferingId); + configMgr.checkDiskOfferingAccess(caller, diskOffering, zone); + + if (!diskOffering.isCustomized() && size != null) { + throw new InvalidParameterValueException("Size provided with a non-custom disk offering"); + } + if ((diskOffering.isCustomizedIops() == null || diskOffering.isCustomizedIops() == false) && (minIops != null || maxIops != null)) { + throw new InvalidParameterValueException("Iops provided with a non-custom-iops disk offering"); + } + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_SHAREDFS_CREATE, eventDescription = "Allocating Shared FileSystem", create = true) + public SharedFS allocSharedFS(CreateSharedFSCmd cmd) { + Account caller = CallContext.current().getCallingAccount(); + + long ownerId = cmd.getEntityOwnerId(); + Account owner = accountMgr.getActiveAccountById(ownerId); + accountMgr.checkAccess(caller, null, true, owner); + DataCenter zone = validateAndGetZone(cmd.getZoneId()); + + Long diskOfferingId = cmd.getDiskOfferingId(); + Long size = cmd.getSize(); + Long minIops = cmd.getMinIops(); + Long maxIops = cmd.getMaxIops(); + validateDiskOffering(diskOfferingId, size, minIops, maxIops, zone); + + SharedFSProvider provider = getSharedFSProvider(cmd.getSharedFSProviderName()); + SharedFSLifeCycle lifeCycle = provider.getSharedFSLifeCycle(); + lifeCycle.checkPrerequisites(zone, cmd.getServiceOfferingId()); + + NetworkVO networkVO = networkDao.findById(cmd.getNetworkId()); + if (networkVO == null) { + throw new InvalidParameterValueException("Unable to find a network with Network ID " + cmd.getNetworkId()); + } + if (networkVO.getGuestType() == Network.GuestType.Shared) { + if ((networkVO.getAclType() != ControlledEntity.ACLType.Account) || + (cmd.getDomainId() != null && (networkVO.getDomainId() != cmd.getDomainId())) || + (networkVO.getAccountId() != owner.getAccountId())) { + throw new InvalidParameterValueException("Shared network which is not Account scoped and not belonging to the same account can not be used to create a Shared FileSystem"); + } + } + + SharedFS.FileSystemType fsType; + try { + fsType = SharedFS.FileSystemType.valueOf(cmd.getFsFormat().toUpperCase()); + } catch (IllegalArgumentException ex) { + throw new InvalidParameterValueException("Invalid File system format specified. Supported formats are EXT4 and XFS"); + } + + if (sharedFSDao.findSharedFSByNameAccountDomain(cmd.getName(), owner.getAccountId(), cmd.getDomainId()) != null) { + throw new InvalidParameterValueException("There already exists a Shared FileSystem with this name for the given account and domain."); + } + + SharedFSVO sharedFS = new SharedFSVO(cmd.getName(), cmd.getDescription(), owner.getDomainId(), + ownerId, cmd.getZoneId(), cmd.getSharedFSProviderName(), SharedFS.Protocol.NFS, + fsType, cmd.getServiceOfferingId()); + + return sharedFSDao.persist(sharedFS); + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_SHAREDFS_CREATE, eventDescription = "Deploying Shared FileSystem", async = true) + public SharedFS deploySharedFS(CreateSharedFSCmd cmd) throws ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException, OperationTimedoutException { + SharedFSVO sharedFS = sharedFSDao.findById(cmd.getEntityId()); + Long diskOfferingId = cmd.getDiskOfferingId(); + Long size = cmd.getSize(); + Long minIops = cmd.getMinIops(); + Long maxIops = cmd.getMaxIops(); + SharedFSProvider provider = getSharedFSProvider(cmd.getSharedFSProviderName()); + SharedFSLifeCycle lifeCycle = provider.getSharedFSLifeCycle(); + Pair result = null; + try { + result = lifeCycle.deploySharedFS(sharedFS, cmd.getNetworkId(), diskOfferingId, size, minIops, maxIops); + } catch (Exception ex) { + stateTransitTo(sharedFS, Event.OperationFailed); + throw ex; + } + sharedFS.setVolumeId(result.first()); + sharedFS.setVmId(result.second()); + sharedFSDao.update(sharedFS.getId(), sharedFS); + stateTransitTo(sharedFS, Event.OperationSucceeded); + return sharedFS; + } + + private SharedFS startSharedFS(SharedFS sharedFS) throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException { + SharedFSProvider provider = getSharedFSProvider(sharedFS.getFsProviderName()); + SharedFSLifeCycle lifeCycle = provider.getSharedFSLifeCycle(); + + try { + stateTransitTo(sharedFS, Event.StartRequested); + lifeCycle.startSharedFS(sharedFS); + } catch (Exception ex) { + stateTransitTo(sharedFS, Event.OperationFailed); + throw ex; + } + stateTransitTo(sharedFS, Event.OperationSucceeded); + sharedFS = sharedFSDao.findById(sharedFS.getId()); + return sharedFS; + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_SHAREDFS_START, eventDescription = "Starting Shared FileSystem") + public SharedFS startSharedFS(Long sharedFSId) throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException { + SharedFSVO sharedFS = sharedFSDao.findById(sharedFSId); + + Account caller = CallContext.current().getCallingAccount(); + accountMgr.checkAccess(caller, null, false, sharedFS); + Set validStates = new HashSet<>(List.of(State.Stopped)); + if (!validStates.contains(sharedFS.getState())) { + throw new InvalidParameterValueException("Shared FileSystem can be started only if it is in the " + validStates.toString() + " state"); + } + return startSharedFS(sharedFS); + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_SHAREDFS_STOP, eventDescription = "Stopping Shared FileSystem") + public SharedFS stopSharedFS(Long sharedFSId, Boolean forced) { + SharedFSVO sharedFS = sharedFSDao.findById(sharedFSId); + Account caller = CallContext.current().getCallingAccount(); + accountMgr.checkAccess(caller, null, false, sharedFS); + Set validStates = new HashSet<>(List.of(State.Ready)); + if (!validStates.contains(sharedFS.getState())) { + throw new InvalidParameterValueException("Shared FileSystem can be stopped only if it is in the " + State.Ready + " state"); + } + + SharedFSProvider provider = getSharedFSProvider(sharedFS.getFsProviderName()); + SharedFSLifeCycle lifeCycle = provider.getSharedFSLifeCycle(); + try { + stateTransitTo(sharedFS, Event.StopRequested); + lifeCycle.stopSharedFS(sharedFS, forced); + } catch (Exception e) { + stateTransitTo(sharedFS, Event.OperationFailed); + throw e; + } + stateTransitTo(sharedFS, Event.OperationSucceeded); + return sharedFS; + } + + private SharedFSVO reDeploySharedFS(SharedFSVO sharedFS) throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException { + SharedFSProvider provider = getSharedFSProvider(sharedFS.getFsProviderName()); + SharedFSLifeCycle lifeCycle = provider.getSharedFSLifeCycle(); + boolean result = lifeCycle.reDeploySharedFS(sharedFS); + return (result ? sharedFS : null); + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_SHAREDFS_RESTART, eventDescription = "Restarting Shared FileSystem", async = true) + public SharedFS restartSharedFS(Long sharedFSId, boolean cleanup) throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException { + SharedFSVO sharedFS = sharedFSDao.findById(sharedFSId); + Account caller = CallContext.current().getCallingAccount(); + accountMgr.checkAccess(caller, null, false, sharedFS); + + Set validStates = new HashSet<>(List.of(State.Ready, State.Stopped)); + if (!validStates.contains(sharedFS.getState())) { + throw new InvalidParameterValueException("Restart Shared FileSystem can be done only if the shared filesystem is in " + validStates.toString() + " states"); + } + + if (!cleanup) { + if (!sharedFS.getState().equals(State.Stopped)) { + stopSharedFS(sharedFS.getId(), false); + } + return startSharedFS(sharedFS.getId()); + } else { + return reDeploySharedFS(sharedFS); + } + } + + private Pair, Integer> searchForSharedFSIdsAndCount(ListSharedFSCmd cmd) { + Account caller = CallContext.current().getCallingAccount(); + List permittedAccounts = new ArrayList<>(); + + Long id = cmd.getId(); + String name = cmd.getName(); + Long networkId = cmd.getNetworkId(); + Long diskOfferingId = cmd.getDiskOfferingId(); + Long serviceOfferingId = cmd.getServiceOfferingId(); + String keyword = cmd.getKeyword(); + Long startIndex = cmd.getStartIndex(); + Long pageSize = cmd.getPageSizeVal(); + Long zoneId = cmd.getZoneId(); + String accountName = cmd.getAccountName(); + Long domainId = cmd.getDomainId(); + Long projectId = cmd.getProjectId(); + + Ternary domainIdRecursiveListProject = new Ternary<>(domainId, cmd.isRecursive(), null); + accountMgr.buildACLSearchParameters(caller, id, accountName, projectId, permittedAccounts, domainIdRecursiveListProject, cmd.listAll(), false); + domainId = domainIdRecursiveListProject.first(); + Boolean isRecursive = domainIdRecursiveListProject.second(); + Project.ListProjectResourcesCriteria listProjectResourcesCriteria = domainIdRecursiveListProject.third(); + Filter searchFilter = new Filter(SharedFSVO.class, "created", false, startIndex, pageSize); + + SearchBuilder sharedFSSearchBuilder = sharedFSDao.createSearchBuilder(); + sharedFSSearchBuilder.select(null, SearchCriteria.Func.DISTINCT, sharedFSSearchBuilder.entity().getId()); // select distinct + accountMgr.buildACLSearchBuilder(sharedFSSearchBuilder, domainId, isRecursive, permittedAccounts, listProjectResourcesCriteria); + + sharedFSSearchBuilder.and("id", sharedFSSearchBuilder.entity().getId(), SearchCriteria.Op.EQ); + sharedFSSearchBuilder.and("name", sharedFSSearchBuilder.entity().getName(), SearchCriteria.Op.EQ); + sharedFSSearchBuilder.and("dataCenterId", sharedFSSearchBuilder.entity().getDataCenterId(), SearchCriteria.Op.EQ); + + if (keyword != null) { + sharedFSSearchBuilder.and("keywordName", sharedFSSearchBuilder.entity().getName(), SearchCriteria.Op.LIKE); + } + + sharedFSSearchBuilder.and("serviceOfferingId", sharedFSSearchBuilder.entity().getServiceOfferingId(), SearchCriteria.Op.EQ); + + if (diskOfferingId != null) { + SearchBuilder volSearch = volumeDao.createSearchBuilder(); + volSearch.and("diskOfferingId", volSearch.entity().getDiskOfferingId(), SearchCriteria.Op.EQ); + sharedFSSearchBuilder.join("volSearch", volSearch, volSearch.entity().getId(), sharedFSSearchBuilder.entity().getVolumeId(), JoinBuilder.JoinType.INNER); + } + + if (networkId != null) { + SearchBuilder nicSearch = nicDao.createSearchBuilder(); + nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ); + sharedFSSearchBuilder.join("nicSearch", nicSearch, nicSearch.entity().getInstanceId(), sharedFSSearchBuilder.entity().getVmId(), JoinBuilder.JoinType.INNER); + } + + SearchCriteria sc = sharedFSSearchBuilder.create(); + accountMgr.buildACLSearchCriteria(sc, domainId, isRecursive, permittedAccounts, listProjectResourcesCriteria); + + if (keyword != null) { + sc.setParameters("keywordName", "%" + keyword + "%"); + } + + if (name != null) { + sc.setParameters("name", name); + } + + if (id != null) { + sc.setParameters("id", id); + } + + if (zoneId != null) { + sc.setParameters("dataCenterId", zoneId); + } + + if (serviceOfferingId != null) { + sc.setParameters("serviceOfferingId", serviceOfferingId); + } + + if (diskOfferingId != null) { + sc.setJoinParameters("volSearch", "diskOfferingId", diskOfferingId); + } + + if (networkId != null) { + sc.setJoinParameters("nicSearch", "networkId", networkId); + } + + Pair, Integer> result = sharedFSDao.searchAndCount(sc, searchFilter); + List idsArray = result.first().stream().map(SharedFSVO::getId).collect(Collectors.toList()); + return new Pair, Integer>(idsArray, result.second()); + } + + private Pair, Integer> searchForSharedFSInternal(ListSharedFSCmd cmd) { + Pair, Integer> sharedFSIds = searchForSharedFSIdsAndCount(cmd); + if (sharedFSIds.second() == 0) { + return new Pair, Integer>(null, 0); + } + + List sharedFSs = sharedFSJoinDao.searchByIds(sharedFSIds.first().toArray(new Long[0])); + return new Pair, Integer>(sharedFSs, sharedFSIds.second()); + } + + @Override + public ListResponse searchForSharedFS(ResponseObject.ResponseView respView, ListSharedFSCmd cmd) { + Pair, Integer> result = searchForSharedFSInternal(cmd); + ListResponse response = new ListResponse<>(); + + if (cmd.getRetrieveOnlyResourceCount()) { + response.setResponses(new ArrayList<>(), result.second()); + return response; + } + + Account caller = CallContext.current().getCallingAccount(); + if (accountMgr.isRootAdmin(caller.getId())) { + respView = ResponseObject.ResponseView.Full; + } + + List sharedFSRespons = null; + if (result.second() > 0) { + sharedFSRespons = sharedFSJoinDao.createSharedFSResponses(respView, result.first().toArray(new SharedFSJoinVO[result.first().size()])); + } + + response.setResponses(sharedFSRespons, result.second()); + return response; + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_SHAREDFS_UPDATE, eventDescription = "Updating Shared FileSystem") + public SharedFS updateSharedFS(UpdateSharedFSCmd cmd) { + Long id = cmd.getId(); + String name = cmd.getName(); + String description = cmd.getDescription(); + + SharedFSVO sharedFS = sharedFSDao.findById(id); + Account caller = CallContext.current().getCallingAccount(); + accountMgr.checkAccess(caller, null, false, sharedFS); + + if (name != null) { + sharedFS.setName(name); + } + if (description != null) { + sharedFS.setDescription(description); + } + + sharedFSDao.update(sharedFS.getId(), sharedFS); + return sharedFS; + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_SHAREDFS_CHANGE_DISK_OFFERING, eventDescription = "Change Shared FileSystem disk offering") + public SharedFS changeSharedFSDiskOffering(ChangeSharedFSDiskOfferingCmd cmd) throws ResourceAllocationException { + SharedFSVO sharedFS = sharedFSDao.findById(cmd.getId()); + Account caller = CallContext.current().getCallingAccount(); + accountMgr.checkAccess(caller, null, false, sharedFS); + Set validStates = new HashSet<>(List.of(State.Ready, State.Stopped)); + + if (!validStates.contains(sharedFS.getState())) { + throw new InvalidParameterValueException("Disk offering of the Shared FileSystem can be changed only if it is in " + validStates.toString() + " states"); + } + + Long diskOfferingId = cmd.getDiskOfferingId(); + Long newSize = cmd.getSize(); + Long newMinIops = cmd.getMinIops(); + Long newMaxIops = cmd.getMaxIops(); + DataCenter zone = validateAndGetZone(sharedFS.getDataCenterId()); + validateDiskOffering(diskOfferingId, newSize, newMinIops, newMaxIops, zone); + volumeApiService.changeDiskOfferingForVolumeInternal(sharedFS.getVolumeId(), diskOfferingId, newSize, newMinIops, newMaxIops, true, false); + return sharedFS; + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_SHAREDFS_CHANGE_SERVICE_OFFERING, eventDescription = "Change Shared FileSystem service offering") + public SharedFS changeSharedFSServiceOffering(ChangeSharedFSServiceOfferingCmd cmd) throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException, ManagementServerException, VirtualMachineMigrationException { + SharedFSVO sharedFS = sharedFSDao.findById(cmd.getId()); + Account caller = CallContext.current().getCallingAccount(); + accountMgr.checkAccess(caller, null, false, sharedFS); + Set validStates = new HashSet<>(List.of(State.Stopped)); + if (!validStates.contains(sharedFS.getState())) { + throw new InvalidParameterValueException("Service offering of the Shared FileSystem can be changed only if it is in " + validStates.toString() + " state"); + } + + SharedFSProvider provider = getSharedFSProvider(sharedFS.getFsProviderName()); + SharedFSLifeCycle lifeCycle = provider.getSharedFSLifeCycle(); + DataCenter zone = validateAndGetZone(sharedFS.getDataCenterId()); + lifeCycle.checkPrerequisites(zone, cmd.getServiceOfferingId()); + + sharedFS = sharedFSDao.findById(cmd.getId()); + + if (lifeCycle.changeSharedFSServiceOffering(sharedFS, cmd.getServiceOfferingId())) { + sharedFS.setServiceOfferingId(cmd.getServiceOfferingId()); + sharedFSDao.update(sharedFS.getId(), sharedFS); + return sharedFS; + } else { + return null; + } + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_SHAREDFS_DESTROY, eventDescription = "Destroy Shared FileSystem") + public Boolean destroySharedFS(DestroySharedFSCmd cmd) { + Long sharedFSId = cmd.getId(); + Boolean expunge = cmd.isExpunge(); + SharedFSVO sharedFS = sharedFSDao.findById(sharedFSId); + + Account caller = CallContext.current().getCallingAccount(); + accountMgr.checkAccess(caller, null, false, sharedFS); + + if (sharedFS.getState().equals(State.Ready) && cmd.isForced()) { + stopSharedFS(sharedFS.getId(), false); + } + + sharedFS = sharedFSDao.findById(sharedFSId); + Set validStates = new HashSet<>(List.of(State.Stopped, State.Error)); + if (!validStates.contains(sharedFS.getState())) { + throw new InvalidParameterValueException("Shared FileSystem can be destroyed only if it is in the " + validStates.toString() + " states"); + } + + stateTransitTo(sharedFS, Event.DestroyRequested); + if (expunge || sharedFS.getState().equals(State.Error)) { + deleteSharedFS(sharedFSId); + } + return true; + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_SHAREDFS_RECOVER, eventDescription = "Recover Shared FileSystem") + public SharedFS recoverSharedFS(Long sharedFSId) { + SharedFSVO sharedFS = sharedFSDao.findById(sharedFSId); + Account caller = CallContext.current().getCallingAccount(); + accountMgr.checkAccess(caller, null, false, sharedFS); + if (!State.Destroyed.equals(sharedFS.getState())) { + throw new InvalidParameterValueException("The Shared FileSystem should be in the Destroyed state to be recovered"); + } + stateTransitTo(sharedFS, Event.RecoveryRequested); + sharedFS = sharedFSDao.findById(sharedFSId); + return sharedFS; + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_SHAREDFS_EXPUNGE, eventDescription = "Expunge Shared FileSystem") + public void deleteSharedFS(Long sharedFSId) { + SharedFSVO sharedFS = sharedFSDao.findById(sharedFSId); + Account caller = CallContext.current().getCallingAccount(); + accountMgr.checkAccess(caller, null, false, sharedFS); + + Set validStates = new HashSet<>(List.of(State.Destroyed, State.Expunging, State.Error)); + if (!validStates.contains(sharedFS.getState())) { + throw new InvalidParameterValueException("Shared FileSystem can be expunged only if it is in the " + validStates.toString() + " states"); + } + SharedFSProvider provider = getSharedFSProvider(sharedFS.getFsProviderName()); + SharedFSLifeCycle lifeCycle = provider.getSharedFSLifeCycle(); + stateTransitTo(sharedFS, Event.ExpungeOperation); + lifeCycle.deleteSharedFS(sharedFS); + stateTransitTo(sharedFS, Event.OperationSucceeded); + sharedFSDao.remove(sharedFS.getId()); + } + + @Override + public String getConfigComponentName() { + return SharedFSService.class.getSimpleName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[]{ + SharedFSCleanupInterval, + SharedFSCleanupDelay, + SharedFSFeatureEnabled, + SharedFSExpungeWorkers + }; + } + protected class SharedFSGarbageCollector extends ManagedContextRunnable { + + public SharedFSGarbageCollector() { + } + + @Override + protected void runInContext() { + try { + logger.trace("Shared FileSystem Garbage Collection Thread is running."); + + cleanupSharedFS(true); + + } catch (Exception e) { + logger.error("Caught the following Exception", e); + } + } + } + + public void cleanupSharedFS(boolean recurring) { + GlobalLock scanLock = GlobalLock.getInternLock("sharedfsservice.cleanup"); + + try { + if (scanLock.lock(30)) { + try { + + List sharedFSs = sharedFSDao.listSharedFSToBeDestroyed(new Date(System.currentTimeMillis() - ((long)SharedFSCleanupDelay.value() << 10))); + for (SharedFSVO sharedFS : sharedFSs) { + try { + stateTransitTo(sharedFS, Event.ExpungeOperation); + deleteSharedFS(sharedFS.getId()); + } catch (Exception e) { + stateTransitTo(sharedFS, Event.OperationFailed); + logger.error(String.format("Unable to expunge Shared FileSystem [%s] due to: [%s].", sharedFS.getUuid(), e.getMessage())); + } + } + } finally { + scanLock.unlock(); + } + } + } finally { + scanLock.releaseRef(); + } + } +} diff --git a/server/src/main/java/org/apache/cloudstack/storage/sharedfs/query/dao/SharedFSJoinDao.java b/server/src/main/java/org/apache/cloudstack/storage/sharedfs/query/dao/SharedFSJoinDao.java new file mode 100644 index 00000000000..5e0be5dd2e6 --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/storage/sharedfs/query/dao/SharedFSJoinDao.java @@ -0,0 +1,38 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.sharedfs.query.dao; + +import java.util.List; + +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.response.SharedFSResponse; +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.query.vo.SharedFSJoinVO; + +import com.cloud.utils.db.GenericDao; + +public interface SharedFSJoinDao extends GenericDao { + + SharedFSJoinVO newSharedFSView(SharedFS sharedFS); + + SharedFSResponse newSharedFSResponse(ResponseObject.ResponseView view, SharedFSJoinVO sharedFSView); + + List createSharedFSResponses(ResponseObject.ResponseView view, SharedFSJoinVO... sharedFSs); + + List searchByIds(Long...sharedFSIds); +} diff --git a/server/src/main/java/org/apache/cloudstack/storage/sharedfs/query/dao/SharedFSJoinDaoImpl.java b/server/src/main/java/org/apache/cloudstack/storage/sharedfs/query/dao/SharedFSJoinDaoImpl.java new file mode 100644 index 00000000000..a7723ec1158 --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/storage/sharedfs/query/dao/SharedFSJoinDaoImpl.java @@ -0,0 +1,187 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.sharedfs.query.dao; + +import java.text.DecimalFormat; +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.response.SharedFSResponse; +import org.apache.cloudstack.api.response.NicResponse; +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.query.vo.SharedFSJoinVO; + +import com.cloud.api.ApiDBUtils; +import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkVO; +import com.cloud.storage.Storage; +import com.cloud.storage.VolumeStats; +import com.cloud.user.VmDiskStatisticsVO; +import com.cloud.user.dao.VmDiskStatisticsDao; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.vm.NicVO; +import com.cloud.vm.dao.NicDao; + +public class SharedFSJoinDaoImpl extends GenericDaoBase implements SharedFSJoinDao { + + @Inject + NicDao nicDao; + + @Inject + NetworkDao networkDao; + + @Inject + private VmDiskStatisticsDao vmDiskStatsDao; + + private final SearchBuilder fsSearch; + private final SearchBuilder fsIdInSearch; + + protected SharedFSJoinDaoImpl() { + fsSearch = createSearchBuilder(); + fsSearch.and("id", fsSearch.entity().getId(), SearchCriteria.Op.EQ); + fsSearch.done(); + + fsIdInSearch = createSearchBuilder(); + fsIdInSearch.and("idIN", fsIdInSearch.entity().getId(), SearchCriteria.Op.IN); + fsIdInSearch.done(); + } + + @Override + public SharedFSJoinVO newSharedFSView(SharedFS sharedFS) { + SearchCriteria sc = fsSearch.create(); + sc.setParameters("id", sharedFS.getId()); + List sharedFSs = searchIncludingRemoved(sc, null, null, false); + assert sharedFSs != null && sharedFSs.size() == 1 : "No shared filesystem found for id " + sharedFS.getId(); + return sharedFSs.get(0); + } + + @Override + public SharedFSResponse newSharedFSResponse(ResponseObject.ResponseView view, SharedFSJoinVO sharedFS) { + SharedFSResponse response = new SharedFSResponse(); + response.setId(sharedFS.getUuid()); + response.setName(sharedFS.getName()); + response.setDescription(sharedFS.getDescription()); + response.setState(sharedFS.getState().toString()); + response.setProvider(sharedFS.getProvider()); + response.setFilesystem(sharedFS.getFsType().toString()); + response.setPath(SharedFS.getSharedFSPath()); + response.setObjectName(SharedFS.class.getSimpleName().toLowerCase()); + response.setZoneId(sharedFS.getZoneUuid()); + response.setZoneName(sharedFS.getZoneName()); + + response.setVirtualMachineId(sharedFS.getInstanceUuid()); + if (sharedFS.getInstanceState() != null) { + response.setVirtualMachineState(sharedFS.getInstanceState().toString()); + } + response.setVolumeId(sharedFS.getVolumeUuid()); + response.setVolumeName(sharedFS.getVolumeName()); + + response.setStoragePoolId(sharedFS.getPoolUuid()); + response.setStoragePoolName(sharedFS.getPoolName()); + + final List nics = nicDao.listByVmId(sharedFS.getInstanceId()); + if (nics.size() > 0) { + for (NicVO nicVO : nics) { + final NetworkVO network = networkDao.findById(nicVO.getNetworkId()); + NicResponse nicResponse = new NicResponse(); + nicResponse.setId(nicVO.getUuid()); + nicResponse.setNetworkid(network.getUuid()); + nicResponse.setIpaddress(nicVO.getIPv4Address()); + nicResponse.setNetworkName(network.getName()); + nicResponse.setObjectName("nic"); + response.addNic(nicResponse); + } + } + + response.setAccountName(sharedFS.getAccountName()); + + response.setDomainId(sharedFS.getDomainUuid()); + response.setDomainName(sharedFS.getDomainName()); + response.setDomainName(sharedFS.getDomainPath()); + + response.setProjectId(sharedFS.getProjectUuid()); + response.setProjectName(sharedFS.getProjectName()); + + response.setDiskOfferingId(sharedFS.getDiskOfferingUuid()); + response.setDiskOfferingName(sharedFS.getDiskOfferingName()); + response.setDiskOfferingDisplayText(sharedFS.getDiskOfferingDisplayText()); + response.setIsCustomDiskOffering(sharedFS.isDiskOfferingCustom()); + if (sharedFS.isDiskOfferingCustom() == true) { + response.setSize(sharedFS.getSize()); + } else { + response.setSize(sharedFS.getDiskOfferingSize()); + } + response.setSizeGB(sharedFS.getSize()); + + response.setServiceOfferingId(sharedFS.getServiceOfferingUuid()); + response.setServiceOfferingName(sharedFS.getServiceOfferingName()); + + if (sharedFS.getProvisioningType() != null) { + response.setProvisioningType(sharedFS.getProvisioningType().toString()); + } + + VmDiskStatisticsVO diskStats = vmDiskStatsDao.findBy(sharedFS.getAccountId(), sharedFS.getZoneId(), sharedFS.getInstanceId(), sharedFS.getVolumeId()); + if (diskStats != null) { + response.setDiskIORead(diskStats.getCurrentIORead()); + response.setDiskIOWrite(diskStats.getCurrentIOWrite()); + response.setDiskKbsRead((long) (diskStats.getCurrentBytesRead() / 1024.0)); + response.setDiskKbsWrite((long) (diskStats.getCurrentBytesWrite() / 1024.0)); + } + + VolumeStats vs = null; + if (sharedFS.getVolumeFormat() == Storage.ImageFormat.VHD || sharedFS.getVolumeFormat() == Storage.ImageFormat.QCOW2 || sharedFS.getVolumeFormat() == Storage.ImageFormat.RAW) { + if (sharedFS.getVolumePath() != null) { + vs = ApiDBUtils.getVolumeStatistics(sharedFS.getVolumePath()); + } + } else if (sharedFS.getVolumeFormat() == Storage.ImageFormat.OVA) { + if (sharedFS.getVolumeChainInfo() != null) { + vs = ApiDBUtils.getVolumeStatistics(sharedFS.getVolumeChainInfo()); + } + } + if (vs != null) { + response.setVirtualSize(vs.getVirtualSize()); + response.setPhysicalSize(vs.getPhysicalSize()); + double util = (double) vs.getPhysicalSize() / vs.getVirtualSize(); + DecimalFormat df = new DecimalFormat("0.0%"); + response.setUtilization(df.format(util)); + } + + return response; + } + + public List createSharedFSResponses(ResponseObject.ResponseView view, SharedFSJoinVO... sharedFSs) { + List sharedFSRespons = new ArrayList<>(); + + for (SharedFSJoinVO sharedFS : sharedFSs) { + sharedFSRespons.add(newSharedFSResponse(view, sharedFS)); + } + return sharedFSRespons; + } + + @Override + public List searchByIds(Long... sharedFSIds) { + SearchCriteria sc = fsIdInSearch.create(); + sc.setParameters("idIN", sharedFSIds); + return search(sc, null, null, false); + } +} diff --git a/server/src/main/java/org/apache/cloudstack/storage/sharedfs/query/vo/SharedFSJoinVO.java b/server/src/main/java/org/apache/cloudstack/storage/sharedfs/query/vo/SharedFSJoinVO.java new file mode 100644 index 00000000000..681cc042c95 --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/storage/sharedfs/query/vo/SharedFSJoinVO.java @@ -0,0 +1,320 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.sharedfs.query.vo; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.EnumType; +import javax.persistence.Enumerated; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.SharedFS.State; + +import com.cloud.api.query.vo.BaseViewVO; +import com.cloud.storage.Storage; +import com.cloud.vm.VirtualMachine; + +@Entity +@Table(name = "shared_filesystem_view") +public class SharedFSJoinVO extends BaseViewVO implements InternalIdentity, Identity { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "name") + private String name; + + @Column(name = "description") + private String description; + + @Column(name = "state") + @Enumerated(value = EnumType.STRING) + private State state; + + @Column(name = "provider") + private String provider; + + @Column(name = "fs_type") + @Enumerated(EnumType.STRING) + SharedFS.FileSystemType fsType; + + @Column(name = "size") + private Long size; + + @Column(name = "zone_id") + private long zoneId; + + @Column(name = "zone_uuid") + private String zoneUuid; + + @Column(name = "zone_name") + private String zoneName; + + @Column(name = "account_id") + private long accountId; + + @Column(name = "instance_id") + private long instanceId; + + @Column(name = "instance_uuid") + private String instanceUuid; + + @Column(name = "instance_name") + private String instanceName; + + @Column(name = "instance_state") + @Enumerated(value = EnumType.STRING) + private VirtualMachine.State instanceState; + + @Column(name = "volume_id") + private long volumeId; + + @Column(name = "volume_uuid") + private String volumeUuid; + + @Column(name = "volume_name") + private String volumeName; + + @Column(name = "provisioning_type") + @Enumerated(EnumType.STRING) + Storage.ProvisioningType provisioningType; + + @Column(name = "volume_format") + @Enumerated(EnumType.STRING) + private Storage.ImageFormat volumeFormat; + + @Column(name = "volume_path") + private String volumePath; + + @Column(name = "volume_chain_info") + private String volumeChainInfo; + + @Column(name = "pool_uuid") + private String poolUuid; + + @Column(name = "pool_name") + private String poolName; + + @Column(name = "account_name") + private String accountName; + + @Column(name = "project_uuid") + private String projectUuid; + + @Column(name = "project_name") + private String projectName; + + @Column(name = "domain_uuid") + private String domainUuid; + + @Column(name = "domain_name") + private String domainName; + + @Column(name = "domain_path") + private String domainPath; + + @Column(name = "service_offering_uuid") + private String serviceOfferingUuid; + + @Column(name = "service_offering_name") + private String serviceOfferingName; + + @Column(name = "disk_offering_uuid") + private String diskOfferingUuid; + + @Column(name = "disk_offering_name") + private String diskOfferingName; + + @Column(name = "disk_offering_display_text") + private String diskOfferingDisplayText; + + @Column(name = "disk_offering_size") + private long diskOfferingSize; + + @Column(name = "disk_offering_custom") + private boolean diskOfferingCustom; + + public SharedFSJoinVO() { + } + + @Override + public long getId() { + return id; + } + + @Override + public String getUuid() { + return uuid; + } + + public String getName() { + return name; + } + + public String getDescription() { + return description; + } + + public State getState() { + return state; + } + + public String getProvider() { + return provider; + } + + public SharedFS.FileSystemType getFsType() { + return fsType; + } + + public Long getSize() { + return size; + } + + public long getZoneId() { + return zoneId; + } + + public long getAccountId() { + return accountId; + } + + public String getZoneUuid() { + return zoneUuid; + } + + public String getZoneName() { + return zoneName; + } + + public long getInstanceId() { + return instanceId; + } + + public String getInstanceUuid() { + return instanceUuid; + } + + public String getInstanceName() { + return instanceName; + } + + public VirtualMachine.State getInstanceState() { + return instanceState; + } + + public long getVolumeId() { + return volumeId; + } + + public String getVolumeUuid() { + return volumeUuid; + } + + public String getVolumeName() { + return volumeName; + } + + public Storage.ProvisioningType getProvisioningType() { + return provisioningType; + } + + public Storage.ImageFormat getVolumeFormat() { + return volumeFormat; + } + + public String getVolumePath() { + return volumePath; + } + + public String getVolumeChainInfo() { + return volumeChainInfo; + } + + public String getPoolUuid() { + return poolUuid; + } + + public String getPoolName() { + return poolName; + } + + public String getAccountName() { + return accountName; + } + + public String getProjectUuid() { + return projectUuid; + } + + public String getProjectName() { + return projectName; + } + + public String getDomainUuid() { + return domainUuid; + } + + public String getDomainName() { + return domainName; + } + + public String getDomainPath() { + return domainPath; + } + + public String getServiceOfferingUuid() { + return serviceOfferingUuid; + } + + public String getServiceOfferingName() { + return serviceOfferingName; + } + + public String getDiskOfferingUuid() { + return diskOfferingUuid; + } + + public String getDiskOfferingName() { + return diskOfferingName; + } + + public String getDiskOfferingDisplayText() { + return diskOfferingDisplayText; + } + + public long getDiskOfferingSize() { + return diskOfferingSize; + } + + public boolean isDiskOfferingCustom() { + return diskOfferingCustom; + } +} diff --git a/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java b/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java index 9e4a5903173..6a34ca2d0e5 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java @@ -71,11 +71,13 @@ import org.apache.cloudstack.api.command.admin.template.ListVnfTemplatesCmdByAdm import org.apache.cloudstack.api.command.admin.template.RegisterVnfTemplateCmdByAdmin; import org.apache.cloudstack.api.command.admin.template.UpdateVnfTemplateCmdByAdmin; import org.apache.cloudstack.api.command.admin.vm.DeployVnfApplianceCmdByAdmin; +import org.apache.cloudstack.api.command.admin.vm.ListVnfAppliancesCmdByAdmin; import org.apache.cloudstack.api.command.user.template.DeleteVnfTemplateCmd; import org.apache.cloudstack.api.command.user.template.ListVnfTemplatesCmd; import org.apache.cloudstack.api.command.user.template.RegisterVnfTemplateCmd; import org.apache.cloudstack.api.command.user.template.UpdateVnfTemplateCmd; import org.apache.cloudstack.api.command.user.vm.DeployVnfApplianceCmd; +import org.apache.cloudstack.api.command.user.vm.ListVnfAppliancesCmd; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.commons.collections.CollectionUtils; @@ -130,6 +132,8 @@ public class VnfTemplateManagerImpl extends ManagerBase implements VnfTemplateMa cmdList.add(DeleteVnfTemplateCmd.class); cmdList.add(DeployVnfApplianceCmd.class); cmdList.add(DeployVnfApplianceCmdByAdmin.class); + cmdList.add(ListVnfAppliancesCmd.class); + cmdList.add(ListVnfAppliancesCmdByAdmin.class); return cmdList; } @@ -287,7 +291,7 @@ public class VnfTemplateManagerImpl extends ManagerBase implements VnfTemplateMa @Override public SecurityGroup createSecurityGroupForVnfAppliance(DataCenter zone, VirtualMachineTemplate template, Account owner, DeployVnfApplianceCmd cmd) { - if (zone == null || !zone.isSecurityGroupEnabled()) { + if (zone == null || !(zone.isSecurityGroupEnabled() || networkModel.isSecurityGroupSupportedForZone(zone.getId()))) { return null; } if (!cmd.getVnfConfigureManagement()) { diff --git a/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManager.java b/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManager.java new file mode 100644 index 00000000000..a42faf2835a --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManager.java @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.user; + +import com.cloud.user.UserAccount; +import org.apache.cloudstack.framework.config.ConfigKey; + +public interface UserPasswordResetManager { + ConfigKey UserPasswordResetEnabled = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, + Boolean.class, + "user.password.reset.enabled", "false", + "Setting this to true allows the ACS user to request an email to reset their password", + false, + ConfigKey.Scope.Global); + + ConfigKey UserPasswordResetTtl = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Long.class, + "user.password.reset.ttl", "30", + "TTL in minutes for the token generated to reset the ACS user's password", true, + ConfigKey.Scope.Global); + + ConfigKey UserPasswordResetEmailSender = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, + String.class, "user.password.reset.email.sender", null, + "Sender for emails sent to the user to reset ACS user's password ", true, + ConfigKey.Scope.Global); + + ConfigKey UserPasswordResetSMTPHost = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, + String.class, "user.password.reset.smtp.host", null, + "Host for SMTP server for sending emails for resetting password for ACS users", + false, + ConfigKey.Scope.Global); + + ConfigKey UserPasswordResetSMTPPort = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, + Integer.class, "user.password.reset.smtp.port", "25", + "Port for SMTP server for sending emails for resetting password for ACS users", + false, + ConfigKey.Scope.Global); + + ConfigKey UserPasswordResetSMTPUseAuth = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, + Boolean.class, "user.password.reset.smtp.useAuth", "false", + "Use auth in the SMTP server for sending emails for resetting password for ACS users", + false, ConfigKey.Scope.Global); + + ConfigKey UserPasswordResetSMTPUsername = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, + String.class, "user.password.reset.smtp.username", null, + "Username for SMTP server for sending emails for resetting password for ACS users", + false, ConfigKey.Scope.Global); + + ConfigKey UserPasswordResetSMTPPassword = new ConfigKey<>("Secure", String.class, + "user.password.reset.smtp.password", null, + "Password for SMTP server for sending emails for resetting password for ACS users", + false, ConfigKey.Scope.Global); + + void setResetTokenAndSend(UserAccount userAccount); + + boolean validateAndResetPassword(UserAccount user, String token, String password); +} diff --git a/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManagerImpl.java b/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManagerImpl.java new file mode 100644 index 00000000000..f35f69fb8bf --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManagerImpl.java @@ -0,0 +1,312 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.user; + +import com.cloud.user.AccountManager; +import com.cloud.user.UserAccount; +import com.cloud.user.UserVO; +import com.cloud.user.dao.UserDao; +import com.cloud.utils.StringUtils; +import com.cloud.utils.component.ManagerBase; +import com.github.mustachejava.DefaultMustacheFactory; +import com.github.mustachejava.Mustache; +import com.github.mustachejava.MustacheFactory; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; +import org.apache.cloudstack.resourcedetail.UserDetailVO; +import org.apache.cloudstack.resourcedetail.dao.UserDetailsDao; +import org.apache.cloudstack.utils.mailing.MailAddress; +import org.apache.cloudstack.utils.mailing.SMTPMailProperties; +import org.apache.cloudstack.utils.mailing.SMTPMailSender; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; +import java.io.IOException; +import java.io.StringReader; +import java.io.StringWriter; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.UUID; + +import static org.apache.cloudstack.config.ApiServiceConfiguration.ManagementServerAddresses; +import static org.apache.cloudstack.resourcedetail.UserDetailVO.PasswordResetToken; +import static org.apache.cloudstack.resourcedetail.UserDetailVO.PasswordResetTokenExpiryDate; + +public class UserPasswordResetManagerImpl extends ManagerBase implements UserPasswordResetManager, Configurable { + + @Inject + private AccountManager accountManager; + + @Inject + private UserDetailsDao userDetailsDao; + + @Inject + private UserDao userDao; + + private SMTPMailSender mailSender; + + public static ConfigKey PasswordResetMailTemplate = + new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, String.class, + "user.password.reset.mail.template", "Hello {{username}}!\n" + + "You have requested to reset your password. Please click the following link to reset your password:\n" + + "http://{{{resetLink}}}\n" + + "If you did not request a password reset, please ignore this email.\n" + + "\n" + + "Regards,\n" + + "The CloudStack Team", + "Password reset mail template. This uses mustache template engine. Available " + + "variables are: username, firstName, lastName, resetLink, token", + true, + ConfigKey.Scope.Global); + + @Override + public String getConfigComponentName() { + return UserPasswordResetManagerImpl.class.getSimpleName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[]{ + UserPasswordResetEnabled, + UserPasswordResetTtl, + UserPasswordResetEmailSender, + UserPasswordResetSMTPHost, + UserPasswordResetSMTPPort, + UserPasswordResetSMTPUseAuth, + UserPasswordResetSMTPUsername, + UserPasswordResetSMTPPassword, + PasswordResetMailTemplate + }; + } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + String smtpHost = UserPasswordResetSMTPHost.value(); + Integer smtpPort = UserPasswordResetSMTPPort.value(); + Boolean useAuth = UserPasswordResetSMTPUseAuth.value(); + String username = UserPasswordResetSMTPUsername.value(); + String password = UserPasswordResetSMTPPassword.value(); + + if (!StringUtils.isEmpty(smtpHost) && smtpPort != null && smtpPort > 0) { + String namespace = "password.reset.smtp"; + + Map configs = new HashMap<>(); + + configs.put(getKey(namespace, SMTPMailSender.CONFIG_HOST), smtpHost); + configs.put(getKey(namespace, SMTPMailSender.CONFIG_PORT), smtpPort.toString()); + configs.put(getKey(namespace, SMTPMailSender.CONFIG_USE_AUTH), useAuth.toString()); + configs.put(getKey(namespace, SMTPMailSender.CONFIG_USERNAME), username); + configs.put(getKey(namespace, SMTPMailSender.CONFIG_PASSWORD), password); + + mailSender = new SMTPMailSender(configs, namespace); + } + return true; + } + + private String getKey(String namespace, String config) { + return String.format("%s.%s", namespace, config); + } + + + protected boolean validateExistingToken(UserAccount userAccount) { + + Map details = userDetailsDao.listDetailsKeyPairs(userAccount.getId()); + + String resetToken = details.get(PasswordResetToken); + String resetTokenExpiryTimeString = details.getOrDefault(PasswordResetTokenExpiryDate, "0"); + + + if (StringUtils.isNotEmpty(resetToken) && StringUtils.isNotEmpty(resetTokenExpiryTimeString)) { + final Date resetTokenExpiryTime = new Date(Long.parseLong(resetTokenExpiryTimeString)); + final Date currentTime = new Date(); + if (currentTime.after(resetTokenExpiryTime)) { + return true; + } + } else if (StringUtils.isEmpty(resetToken)) { + return true; + } + return false; + } + + public void setResetTokenAndSend(UserAccount userAccount) { + if (mailSender == null) { + logger.debug("Failed to reset token and send email. SMTP mail sender is not configured."); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, + "Failed to reset token and send email. SMTP mail sender is not configured"); + } + + if (!validateExistingToken(userAccount)) { + logger.debug(String.format( + "Failed to reset token and send email. Password reset token is already set for user %s in " + + "domain id: %s with account %s and email %s", + userAccount.getUsername(), userAccount.getDomainId(), + userAccount.getAccountName(), userAccount.getEmail())); + return; + } + + final String resetToken = UUID.randomUUID().toString(); + final Date resetTokenExpiryTime = new Date(System.currentTimeMillis() + UserPasswordResetTtl.value() * 60 * 1000); + + userDetailsDao.addDetail(userAccount.getId(), PasswordResetToken, resetToken, false); + userDetailsDao.addDetail(userAccount.getId(), PasswordResetTokenExpiryDate, String.valueOf(resetTokenExpiryTime.getTime()), false); + + final String email = userAccount.getEmail(); + final String username = userAccount.getUsername(); + final String subject = "Password Reset Request"; + + String resetLink = String.format("%s/client/#/user/resetPassword?username=%s&token=%s", + ManagementServerAddresses.value().split(",")[0], username, resetToken); + String content = getMessageBody(userAccount, resetToken, resetLink); + + SMTPMailProperties mailProperties = new SMTPMailProperties(); + + mailProperties.setSender(new MailAddress(UserPasswordResetEmailSender.value())); + mailProperties.setSubject(subject); + mailProperties.setContent(content); + mailProperties.setContentType("text/html; charset=utf-8"); + + Set addresses = new HashSet<>(); + + addresses.add(new MailAddress(email)); + + mailProperties.setRecipients(addresses); + + mailSender.sendMail(mailProperties); + logger.debug(String.format( + "User password reset email for user id: %d username: %s account id: %d" + + " domain id:%d sent to %s with token expiry at %s", + userAccount.getId(), username, userAccount.getAccountId(), + userAccount.getDomainId(), email, resetTokenExpiryTime)); + } + + @Override + public boolean validateAndResetPassword(UserAccount user, String token, String password) { + UserDetailVO resetTokenDetail = userDetailsDao.findDetail(user.getId(), PasswordResetToken); + UserDetailVO resetTokenExpiryDate = userDetailsDao.findDetail(user.getId(), PasswordResetTokenExpiryDate); + + if (resetTokenDetail == null || resetTokenExpiryDate == null) { + logger.debug(String.format( + "Failed to reset password. No reset token found for user id: %d username: %s account" + + " id: %d domain id: %d", + user.getId(), user.getUsername(), user.getAccountId(), user.getDomainId())); + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("No reset token found for user %s", user.getUsername())); + } + + Date resetTokenExpiryTime = new Date(Long.parseLong(resetTokenExpiryDate.getValue())); + + Date now = new Date(); + String resetToken = resetTokenDetail.getValue(); + if (StringUtils.isEmpty(resetToken)) { + logger.debug(String.format( + "Failed to reset password. No reset token found for user id: %d username: %s account" + + " id: %d domain id: %d", + user.getId(), user.getUsername(), user.getAccountId(), user.getDomainId())); + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("No reset token found for user %s", user.getUsername())); + } + if (!resetToken.equals(token)) { + logger.debug(String.format( + "Failed to reset password. Invalid reset token for user id: %d username: %s " + + "account id: %d domain id: %d", + user.getId(), user.getUsername(), user.getAccountId(), user.getDomainId())); + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("Invalid reset token for user %s", user.getUsername())); + } + if (now.after(resetTokenExpiryTime)) { + logger.debug(String.format( + "Failed to reset password. Reset token has expired for user id: %d username: %s " + + "account id: %d domain id: %d", + user.getId(), user.getUsername(), user.getAccountId(), user.getDomainId())); + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("Reset token has expired for user %s", user.getUsername())); + } + + resetPassword(user, password); + logger.debug(String.format( + "Password reset successful for user id: %d username: %s account id: %d domain id: %d", + user.getId(), user.getUsername(), user.getAccountId(), user.getDomainId())); + return true; + } + + + void resetPassword(UserAccount userAccount, String password) { + UserVO user = userDao.getUser(userAccount.getId()); + + accountManager.validateUserPasswordAndUpdateIfNeeded(password, user, "", true); + + userDetailsDao.removeDetail(userAccount.getId(), PasswordResetToken); + userDetailsDao.removeDetail(userAccount.getId(), PasswordResetTokenExpiryDate); + + userDao.persist(user); + } + + String getMessageBody(UserAccount userAccount, String token, String resetLink) { + MustacheFactory mf = new DefaultMustacheFactory(); + Mustache mustache = mf.compile(new StringReader(PasswordResetMailTemplate.value()), "password.reset.mail"); + StringWriter writer = new StringWriter(); + + PasswordResetMail values = new PasswordResetMail(userAccount.getUsername(), userAccount.getFirstname(), userAccount.getLastname(), resetLink, token); + + try { + mustache.execute(writer, values).flush(); + } catch (IOException e) { + throw new RuntimeException(e); + } + return writer.toString(); + + } + + static class PasswordResetMail { + private String username; + private String firstName; + private String lastName; + private String resetLink; + private String token; + + + public PasswordResetMail(String username, String firstName, String lastName, String resetLink, String token) { + this.username = username; + this.firstName = firstName; + this.lastName = lastName; + this.resetLink = resetLink; + this.token = token; + } + + public String getUsername() { + return username; + } + + public String getFirstName() { + return firstName; + } + + public String getLastName() { + return lastName; + } + + public String getResetLink() { + return resetLink; + } + + public String getToken() { + return token; + } + } +} diff --git a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java index f2b552c7c07..fc982668e73 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java @@ -19,6 +19,8 @@ package org.apache.cloudstack.vm; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CheckConvertInstanceAnswer; +import com.cloud.agent.api.CheckConvertInstanceCommand; import com.cloud.agent.api.CheckVolumeAnswer; import com.cloud.agent.api.CheckVolumeCommand; import com.cloud.agent.api.ConvertInstanceAnswer; @@ -95,7 +97,6 @@ import com.cloud.storage.ScopeType; import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage; -import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateVO; @@ -447,7 +448,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { return true; } hostDao.loadHostTags(host); - return host.checkHostServiceOfferingAndTemplateTags(serviceOffering, template); + return host.checkHostServiceOfferingAndTemplateTags(serviceOffering, template, UserVmManager.getStrictHostTags()); } private boolean storagePoolSupportsDiskOffering(StoragePool pool, DiskOffering diskOffering) { @@ -689,7 +690,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { // Check for duplicate hostname in network, get all vms hostNames in the network List hostNames = vmDao.listDistinctHostNames(network.getId()); if (CollectionUtils.isNotEmpty(hostNames) && hostNames.contains(hostName)) { - throw new InvalidParameterValueException(String.format("VM with Name [%s] already exists in the network [%s] domain [%s]. Cannot import another VM with the same name. Pleasy try again with a different name.", hostName, network, network.getNetworkDomain())); + throw new InvalidParameterValueException(String.format("VM with Name [%s] already exists in the network [%s] domain [%s]. Cannot import another VM with the same name. Please try again with a different name.", hostName, network, network.getNetworkDomain())); } } @@ -796,13 +797,20 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } } copyRemoteVolumeCommand.setTempPath(tmpPath); + int copyTimeout = UnmanagedVMsManager.RemoteKvmInstanceDisksCopyTimeout.value(); + if (copyTimeout <= 0) { + copyTimeout = Integer.valueOf(UnmanagedVMsManager.RemoteKvmInstanceDisksCopyTimeout.defaultValue()); + } + int copyTimeoutInSecs = copyTimeout * 60; + copyRemoteVolumeCommand.setWait(copyTimeoutInSecs); + logger.error(String.format("Initiating copy remote volume %s from %s, timeout %d secs", path, remoteUrl, copyTimeoutInSecs)); Answer answer = agentManager.easySend(dest.getHost().getId(), copyRemoteVolumeCommand); if (!(answer instanceof CopyRemoteVolumeAnswer)) { - throw new CloudRuntimeException("Error while copying volume"); + throw new CloudRuntimeException("Error while copying volume of remote instance: " + answer.getDetails()); } CopyRemoteVolumeAnswer copyRemoteVolumeAnswer = (CopyRemoteVolumeAnswer) answer; if(!copyRemoteVolumeAnswer.getResult()) { - throw new CloudRuntimeException("Error while copying volume"); + throw new CloudRuntimeException("Unable to copy volume of remote instance"); } diskProfile.setSize(copyRemoteVolumeAnswer.getSize()); DiskProfile profile = volumeManager.updateImportedVolume(type, diskOffering, vm, template, deviceId, @@ -815,7 +823,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { Volume.Type type, VirtualMachineTemplate template, Long deviceId, Long hostId, String diskPath, DiskProfile diskProfile) { List storagePools = primaryDataStoreDao.findLocalStoragePoolsByHostAndTags(hostId, null); - if(storagePools.size() < 1) { throw new CloudRuntimeException("Local Storage not found for host"); } @@ -828,7 +835,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { return new Pair<>(profile, storagePool); } - private Pair importKVMSharedDisk(VirtualMachine vm, DiskOffering diskOffering, Volume.Type type, VirtualMachineTemplate template, Long deviceId, Long poolId, String diskPath, DiskProfile diskProfile) { @@ -840,7 +846,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { return new Pair<>(profile, storagePool); } - private Pair importDisk(UnmanagedInstanceTO.Disk disk, VirtualMachine vm, Cluster cluster, DiskOffering diskOffering, Volume.Type type, String name, Long diskSize, Long minIops, Long maxIops, VirtualMachineTemplate template, Account owner, Long deviceId) { @@ -1560,17 +1565,30 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { return userVm; } - private UnmanagedInstanceTO cloneSourceVmwareUnmanagedInstance(String vcenter, String datacenterName, String username, String password, String clusterName, String sourceHostName, String sourceVM) { + private Pair getSourceVmwareUnmanagedInstance(String vcenter, String datacenterName, String username, + String password, String clusterName, String sourceHostName, + String sourceVM) { HypervisorGuru vmwareGuru = hypervisorGuruManager.getGuru(Hypervisor.HypervisorType.VMware); Map params = createParamsForTemplateFromVmwareVmMigration(vcenter, datacenterName, username, password, clusterName, sourceHostName, sourceVM); - return vmwareGuru.cloneHypervisorVMOutOfBand(sourceHostName, sourceVM, params); + return vmwareGuru.getHypervisorVMOutOfBandAndCloneIfRequired(sourceHostName, sourceVM, params); + } + + private String createOvfTemplateOfSourceVmwareUnmanagedInstance(String vcenter, String datacenterName, String username, + String password, String clusterName, String sourceHostName, + String sourceVMwareInstanceName, DataStoreTO convertLocation, int threadsCountToExportOvf) { + HypervisorGuru vmwareGuru = hypervisorGuruManager.getGuru(Hypervisor.HypervisorType.VMware); + + Map params = createParamsForTemplateFromVmwareVmMigration(vcenter, datacenterName, + username, password, clusterName, sourceHostName, sourceVMwareInstanceName); + + return vmwareGuru.createVMTemplateOutOfBand(sourceHostName, sourceVMwareInstanceName, params, convertLocation, threadsCountToExportOvf); } protected UserVm importUnmanagedInstanceFromVmwareToKvm(DataCenter zone, Cluster destinationCluster, VMTemplateVO template, - String sourceVM, String displayName, String hostName, + String sourceVMName, String displayName, String hostName, Account caller, Account owner, long userId, ServiceOfferingVO serviceOffering, Map dataDiskOfferingMap, Map nicNetworkMap, Map nicIpAddressMap, @@ -1597,7 +1615,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { if (existingVcenterId != null) { VmwareDatacenterVO existingDC = vmwareDatacenterDao.findById(existingVcenterId); if (existingDC == null) { - String err = String.format("Cannot find any existing Vmware DC with ID %s", existingVcenterId); + String err = String.format("Cannot find any existing VMware DC with ID %s", existingVcenterId); logger.error(err); throw new CloudRuntimeException(err); } @@ -1607,21 +1625,52 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { password = existingDC.getPassword(); } - UnmanagedInstanceTO clonedInstance = null; + boolean isClonedInstance = false; + UnmanagedInstanceTO sourceVMwareInstance = null; + DataStoreTO temporaryConvertLocation = null; + String ovfTemplateOnConvertLocation = null; try { + HostVO convertHost = selectInstanceConversionKVMHostInCluster(destinationCluster, convertInstanceHostId); + CheckConvertInstanceAnswer conversionSupportAnswer = checkConversionSupportOnHost(convertHost, sourceVMName, false); + logger.debug(String.format("The host %s (%s) is selected to execute the conversion of the instance %s" + + " from VMware to KVM ", convertHost.getId(), convertHost.getName(), sourceVMName)); + + temporaryConvertLocation = selectInstanceConversionTemporaryLocation(destinationCluster, convertStoragePoolId); + List convertStoragePools = findInstanceConversionStoragePoolsInCluster(destinationCluster); + long importStartTime = System.currentTimeMillis(); + Pair sourceInstanceDetails = getSourceVmwareUnmanagedInstance(vcenter, datacenterName, username, password, clusterName, sourceHostName, sourceVMName); + sourceVMwareInstance = sourceInstanceDetails.first(); + isClonedInstance = sourceInstanceDetails.second(); + boolean isWindowsVm = sourceVMwareInstance.getOperatingSystem().toLowerCase().contains("windows"); + if (isWindowsVm) { + checkConversionSupportOnHost(convertHost, sourceVMName, true); + } + String instanceName = getGeneratedInstanceName(owner); - clonedInstance = cloneSourceVmwareUnmanagedInstance(vcenter, datacenterName, username, password, - clusterName, sourceHostName, sourceVM); - checkNetworkingBeforeConvertingVmwareInstance(zone, owner, instanceName, hostName, clonedInstance, nicNetworkMap, nicIpAddressMap, forced); - UnmanagedInstanceTO convertedInstance = convertVmwareInstanceToKVM(vcenter, datacenterName, clusterName, username, password, - sourceHostName, clonedInstance, destinationCluster, convertInstanceHostId, convertStoragePoolId); - sanitizeConvertedInstance(convertedInstance, clonedInstance); + checkNetworkingBeforeConvertingVmwareInstance(zone, owner, instanceName, hostName, sourceVMwareInstance, nicNetworkMap, nicIpAddressMap, forced); + UnmanagedInstanceTO convertedInstance; + if (cmd.getForceMsToImportVmFiles() || !conversionSupportAnswer.isOvfExportSupported()) { + // Uses MS for OVF export to temporary conversion location + int noOfThreads = UnmanagedVMsManager.ThreadsOnMSToImportVMwareVMFiles.value(); + ovfTemplateOnConvertLocation = createOvfTemplateOfSourceVmwareUnmanagedInstance(vcenter, datacenterName, username, password, + clusterName, sourceHostName, sourceVMwareInstance.getName(), temporaryConvertLocation, noOfThreads); + convertedInstance = convertVmwareInstanceToKVMWithOVFOnConvertLocation(sourceVMName, sourceVMwareInstance, convertHost, convertStoragePools, + temporaryConvertLocation, ovfTemplateOnConvertLocation); + } else { + // Uses KVM Host for OVF export to temporary conversion location, through ovftool + convertedInstance = convertVmwareInstanceToKVMAfterExportingOVFToConvertLocation(sourceVMName, sourceVMwareInstance, convertHost, convertStoragePools, + temporaryConvertLocation, vcenter, username, password, datacenterName); + } + + sanitizeConvertedInstance(convertedInstance, sourceVMwareInstance); UserVm userVm = importVirtualMachineInternal(convertedInstance, instanceName, zone, destinationCluster, null, template, displayName, hostName, caller, owner, userId, serviceOffering, dataDiskOfferingMap, nicNetworkMap, nicIpAddressMap, details, false, forced, false); - logger.debug(String.format("VM %s imported successfully", sourceVM)); + long timeElapsedInSecs = (System.currentTimeMillis() - importStartTime) / 1000; + logger.debug(String.format("VMware VM %s imported successfully to CloudStack instance %s (%s), Time taken: %d secs, OVF files imported from %s, Source VMware VM details - OS: %s, PowerState: %s, Disks: %s, NICs: %s", + sourceVMName, instanceName, displayName, timeElapsedInSecs, (ovfTemplateOnConvertLocation != null)? "MS" : "KVM Host", sourceVMwareInstance.getOperatingSystem(), sourceVMwareInstance.getPowerState(), sourceVMwareInstance.getDisks(), sourceVMwareInstance.getNics())); return userVm; } catch (CloudRuntimeException e) { logger.error(String.format("Error importing VM: %s", e.getMessage()), e); @@ -1629,20 +1678,25 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { cmd.getEventDescription(), null, null, 0); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } finally { - removeClonedInstance(vcenter, datacenterName, username, password, sourceHostName, clonedInstance.getName(), sourceVM); + if (isClonedInstance && sourceVMwareInstance != null) { + removeClonedInstance(vcenter, datacenterName, username, password, sourceHostName, sourceVMwareInstance.getName(), sourceVMName); + } + if (temporaryConvertLocation != null && StringUtils.isNotBlank(ovfTemplateOnConvertLocation)) { + removeTemplate(temporaryConvertLocation, ovfTemplateOnConvertLocation); + } } } private void checkNetworkingBeforeConvertingVmwareInstance(DataCenter zone, Account owner, String instanceName, - String hostName, UnmanagedInstanceTO clonedInstance, + String hostName, UnmanagedInstanceTO sourceVMwareInstance, Map nicNetworkMap, Map nicIpAddressMap, boolean forced) { - List nics = clonedInstance.getNics(); + List nics = sourceVMwareInstance.getNics(); List networkIds = new ArrayList<>(nicNetworkMap.values()); if (nics.size() != networkIds.size()) { String msg = String.format("Different number of nics found on instance %s: %s vs %s nics provided", - clonedInstance.getName(), nics.size(), networkIds.size()); + sourceVMwareInstance.getName(), nics.size(), networkIds.size()); logger.error(msg); throw new CloudRuntimeException(msg); } @@ -1670,8 +1724,8 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { private void checkUnmanagedNicAndNetworkMacAddressForImport(NetworkVO network, UnmanagedInstanceTO.Nic nic, boolean forced) { NicVO existingNic = nicDao.findByNetworkIdAndMacAddress(network.getId(), nic.getMacAddress()); if (existingNic != null && !forced) { - String err = String.format("NIC with MAC address = %s exists on network with ID = %s and forced flag is disabled", - nic.getMacAddress(), network.getId()); + String err = String.format("NIC with MAC address %s already exists on network with ID %s and forced flag is disabled. " + + "Retry with forced flag enabled if a new MAC address to be generated.", nic.getMacAddress(), network.getUuid()); logger.error(err); throw new CloudRuntimeException(err); } @@ -1686,38 +1740,44 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { return VirtualMachineName.getVmName(id, owner.getId(), instanceSuffix); } - private void sanitizeConvertedInstance(UnmanagedInstanceTO convertedInstance, UnmanagedInstanceTO clonedInstance) { - convertedInstance.setCpuCores(clonedInstance.getCpuCores()); - convertedInstance.setCpuSpeed(clonedInstance.getCpuSpeed()); - convertedInstance.setCpuCoresPerSocket(clonedInstance.getCpuCoresPerSocket()); - convertedInstance.setMemory(clonedInstance.getMemory()); + private void sanitizeConvertedInstance(UnmanagedInstanceTO convertedInstance, UnmanagedInstanceTO sourceVMwareInstance) { + convertedInstance.setCpuCores(sourceVMwareInstance.getCpuCores()); + convertedInstance.setCpuSpeed(sourceVMwareInstance.getCpuSpeed()); + convertedInstance.setCpuCoresPerSocket(sourceVMwareInstance.getCpuCoresPerSocket()); + convertedInstance.setMemory(sourceVMwareInstance.getMemory()); convertedInstance.setPowerState(UnmanagedInstanceTO.PowerState.PowerOff); List convertedInstanceDisks = convertedInstance.getDisks(); - List clonedInstanceDisks = clonedInstance.getDisks(); + List sourceVMwareInstanceDisks = sourceVMwareInstance.getDisks(); for (int i = 0; i < convertedInstanceDisks.size(); i++) { UnmanagedInstanceTO.Disk disk = convertedInstanceDisks.get(i); - disk.setDiskId(clonedInstanceDisks.get(i).getDiskId()); + disk.setDiskId(sourceVMwareInstanceDisks.get(i).getDiskId()); } List convertedInstanceNics = convertedInstance.getNics(); - List clonedInstanceNics = clonedInstance.getNics(); - if (CollectionUtils.isEmpty(convertedInstanceNics) && CollectionUtils.isNotEmpty(clonedInstanceNics)) { - for (UnmanagedInstanceTO.Nic nic : clonedInstanceNics) { + List sourceVMwareInstanceNics = sourceVMwareInstance.getNics(); + if (CollectionUtils.isEmpty(convertedInstanceNics) && CollectionUtils.isNotEmpty(sourceVMwareInstanceNics)) { + for (UnmanagedInstanceTO.Nic nic : sourceVMwareInstanceNics) { // In case the NICs information is not parsed from the converted XML domain, use the cloned instance NICs with virtio adapter nic.setAdapterType("virtio"); } - convertedInstance.setNics(clonedInstanceNics); - } else { + convertedInstance.setNics(sourceVMwareInstanceNics); for (int i = 0; i < convertedInstanceNics.size(); i++) { UnmanagedInstanceTO.Nic nic = convertedInstanceNics.get(i); - nic.setNicId(clonedInstanceNics.get(i).getNicId()); + nic.setNicId(sourceVMwareInstanceNics.get(i).getNicId()); + } + } else if (CollectionUtils.isNotEmpty(convertedInstanceNics) && CollectionUtils.isNotEmpty(sourceVMwareInstanceNics) + && convertedInstanceNics.size() == sourceVMwareInstanceNics.size()) { + for (int i = 0; i < convertedInstanceNics.size(); i++) { + UnmanagedInstanceTO.Nic nic = convertedInstanceNics.get(i); + nic.setNicId(sourceVMwareInstanceNics.get(i).getNicId()); + if (nic.getMacAddress() == null) { + nic.setMacAddress(sourceVMwareInstanceNics.get(i).getMacAddress()); + } } } } - private void removeClonedInstance(String vcenter, String datacenterName, - String username, String password, - String sourceHostName, String clonedInstanceName, - String sourceVM) { + private void removeClonedInstance(String vcenter, String datacenterName, String username, String password, + String sourceHostName, String clonedInstanceName, String sourceVM) { HypervisorGuru vmwareGuru = hypervisorGuruManager.getGuru(Hypervisor.HypervisorType.VMware); Map params = createParamsForRemoveClonedInstance(vcenter, datacenterName, username, password, sourceVM); boolean result = vmwareGuru.removeClonedHypervisorVMOutOfBand(sourceHostName, clonedInstanceName, params); @@ -1727,10 +1787,23 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { logger.warn(msg); return; } - logger.debug(String.format("Removed the cloned instance %s from VMWare datacenter %s:%s", + logger.debug(String.format("Removed the cloned instance %s from VMWare datacenter %s/%s", clonedInstanceName, vcenter, datacenterName)); } + private void removeTemplate(DataStoreTO convertLocation, String ovfTemplateOnConvertLocation) { + HypervisorGuru vmwareGuru = hypervisorGuruManager.getGuru(Hypervisor.HypervisorType.VMware); + boolean result = vmwareGuru.removeVMTemplateOutOfBand(convertLocation, ovfTemplateOnConvertLocation); + if (!result) { + String msg = String.format("Could not remove the template file %s on datastore %s", + ovfTemplateOnConvertLocation, convertLocation.getUrl()); + logger.warn(msg); + return; + } + logger.debug(String.format("Removed the template file %s on datastore %s", + ovfTemplateOnConvertLocation, convertLocation.getUrl())); + } + private Map createParamsForRemoveClonedInstance(String vcenter, String datacenterName, String username, String password, String sourceVM) { Map params = new HashMap<>(); @@ -1741,7 +1814,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { return params; } - private HostVO selectInstanceConvertionKVMHostInCluster(Cluster destinationCluster, Long convertInstanceHostId) { + private HostVO selectInstanceConversionKVMHostInCluster(Cluster destinationCluster, Long convertInstanceHostId) { if (convertInstanceHostId != null) { HostVO selectedHost = hostDao.findById(convertInstanceHostId); if (selectedHost == null) { @@ -1758,41 +1831,61 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } return selectedHost; } - List hosts = hostDao.listByClusterAndHypervisorType(destinationCluster.getId(), destinationCluster.getHypervisorType()); - if (CollectionUtils.isEmpty(hosts)) { - String err = String.format("Could not find any running %s host in cluster %s", - destinationCluster.getHypervisorType(), destinationCluster.getName()); - logger.error(err); - throw new CloudRuntimeException(err); + // Auto select host with conversion capability + List hosts = hostDao.listByClusterHypervisorTypeAndHostCapability(destinationCluster.getId(), destinationCluster.getHypervisorType(), Host.HOST_INSTANCE_CONVERSION); + if (CollectionUtils.isNotEmpty(hosts)) { + return hosts.get(new Random().nextInt(hosts.size())); } - List filteredHosts = hosts.stream() - .filter(x -> x.getResourceState() == ResourceState.Enabled) - .collect(Collectors.toList()); - if (CollectionUtils.isEmpty(filteredHosts)) { - String err = String.format("Could not find a %s host in cluster %s to perform the instance conversion", - destinationCluster.getHypervisorType(), destinationCluster.getName()); - logger.error(err); - throw new CloudRuntimeException(err); + + // Try without host capability check + hosts = hostDao.listByClusterAndHypervisorType(destinationCluster.getId(), destinationCluster.getHypervisorType()); + if (CollectionUtils.isNotEmpty(hosts)) { + return hosts.get(new Random().nextInt(hosts.size())); } - return filteredHosts.get(new Random().nextInt(filteredHosts.size())); + + String err = String.format("Could not find any suitable %s host in cluster %s to perform the instance conversion", + destinationCluster.getHypervisorType(), destinationCluster.getName()); + logger.error(err); + throw new CloudRuntimeException(err); } - private UnmanagedInstanceTO convertVmwareInstanceToKVM(String vcenter, String datacenterName, String clusterName, - String username, String password, String hostName, - UnmanagedInstanceTO clonedInstance, Cluster destinationCluster, - Long convertInstanceHostId, Long convertStoragePoolId) { - HostVO convertHost = selectInstanceConvertionKVMHostInCluster(destinationCluster, convertInstanceHostId); - String vmName = clonedInstance.getName(); - logger.debug(String.format("The host %s (%s) is selected to execute the conversion of the instance %s" + - " from VMware to KVM ", convertHost.getId(), convertHost.getName(), vmName)); + private CheckConvertInstanceAnswer checkConversionSupportOnHost(HostVO convertHost, String sourceVM, boolean checkWindowsGuestConversionSupport) { + logger.debug(String.format("Checking the %s conversion support on the host %s (%s)", checkWindowsGuestConversionSupport? "windows guest" : "", convertHost.getId(), convertHost.getName())); + CheckConvertInstanceCommand cmd = new CheckConvertInstanceCommand(checkWindowsGuestConversionSupport); + int timeoutSeconds = 60; + cmd.setWait(timeoutSeconds); - RemoteInstanceTO remoteInstanceTO = new RemoteInstanceTO(hostName, vmName, - vcenter, datacenterName, clusterName, username, password); - DataStoreTO temporaryConvertLocation = selectInstanceConversionTemporaryLocation(destinationCluster, convertStoragePoolId, convertHost); - List destinationStoragePools = selectInstanceConvertionStoragePools(destinationCluster, clonedInstance.getDisks()); + CheckConvertInstanceAnswer checkConvertInstanceAnswer; + try { + checkConvertInstanceAnswer = (CheckConvertInstanceAnswer) agentManager.send(convertHost.getId(), cmd); + } catch (AgentUnavailableException | OperationTimedoutException e) { + String err = String.format("Failed to check %s conversion support on the host %s for converting instance %s from VMware to KVM due to: %s", + checkWindowsGuestConversionSupport? "windows guest" : "", convertHost.getName(), sourceVM, e.getMessage()); + logger.error(err); + throw new CloudRuntimeException(err); + } + + if (!checkConvertInstanceAnswer.getResult()) { + String err = String.format("The host %s doesn't support conversion of instance %s from VMware to KVM due to: %s", + convertHost.getName(), sourceVM, checkConvertInstanceAnswer.getDetails()); + logger.error(err); + throw new CloudRuntimeException(err); + } + + return checkConvertInstanceAnswer; + } + + private UnmanagedInstanceTO convertVmwareInstanceToKVMWithOVFOnConvertLocation(String sourceVM, UnmanagedInstanceTO sourceVMwareInstance, HostVO convertHost, + List convertStoragePools, DataStoreTO temporaryConvertLocation, + String ovfTemplateDirConvertLocation) { + logger.debug(String.format("Delegating the conversion of instance %s from VMware to KVM to the host %s (%s) using OVF %s on conversion datastore", + sourceVM, convertHost.getId(), convertHost.getName(), ovfTemplateDirConvertLocation)); + + RemoteInstanceTO remoteInstanceTO = new RemoteInstanceTO(sourceVM); + List destinationStoragePools = selectInstanceConversionStoragePools(convertStoragePools, sourceVMwareInstance.getDisks()); ConvertInstanceCommand cmd = new ConvertInstanceCommand(remoteInstanceTO, - Hypervisor.HypervisorType.KVM, destinationStoragePools, temporaryConvertLocation); - int timeoutSeconds = StorageManager.ConvertVmwareInstanceToKvmTimeout.value() * 60 * 60; + Hypervisor.HypervisorType.KVM, destinationStoragePools, temporaryConvertLocation, ovfTemplateDirConvertLocation, false, false); + int timeoutSeconds = UnmanagedVMsManager.ConvertVmwareInstanceToKvmTimeout.value() * 60 * 60; cmd.setWait(timeoutSeconds); Answer convertAnswer; @@ -1806,17 +1899,68 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } if (!convertAnswer.getResult()) { - String err = String.format("The convert process failed for instance %s from Vmware to KVM on host %s: %s", - vmName, convertHost.getName(), convertAnswer.getDetails()); + String err = String.format("The convert process failed for instance %s from VMware to KVM on host %s: %s", + sourceVM, convertHost.getName(), convertAnswer.getDetails()); logger.error(err); throw new CloudRuntimeException(err); } return ((ConvertInstanceAnswer) convertAnswer).getConvertedInstance(); } - private List selectInstanceConvertionStoragePools(Cluster destinationCluster, List disks) { + private UnmanagedInstanceTO convertVmwareInstanceToKVMAfterExportingOVFToConvertLocation(String sourceVM, UnmanagedInstanceTO sourceVMwareInstance, HostVO convertHost, + List convertStoragePools, DataStoreTO temporaryConvertLocation, + String vcenterHost, String vcenterUsername, String vcenterPassword, String datacenterName) { + logger.debug(String.format("Delegating the conversion of instance %s from VMware to KVM to the host %s (%s) after OVF export through ovftool", + sourceVM, convertHost.getId(), convertHost.getName())); + + RemoteInstanceTO remoteInstanceTO = new RemoteInstanceTO(sourceVMwareInstance.getName(), vcenterHost, vcenterUsername, vcenterPassword, datacenterName); + List destinationStoragePools = selectInstanceConversionStoragePools(convertStoragePools, sourceVMwareInstance.getDisks()); + ConvertInstanceCommand cmd = new ConvertInstanceCommand(remoteInstanceTO, + Hypervisor.HypervisorType.KVM, destinationStoragePools, temporaryConvertLocation, null, false, true); + int timeoutSeconds = UnmanagedVMsManager.ConvertVmwareInstanceToKvmTimeout.value() * 60 * 60; + cmd.setWait(timeoutSeconds); + int noOfThreads = UnmanagedVMsManager.ThreadsOnKVMHostToImportVMwareVMFiles.value(); + if (noOfThreads == 0) { + // Use no. of threads as the disks count + noOfThreads = sourceVMwareInstance.getDisks().size(); + } + cmd.setThreadsCountToExportOvf(noOfThreads); + + Answer convertAnswer; + try { + convertAnswer = agentManager.send(convertHost.getId(), cmd); + } catch (AgentUnavailableException | OperationTimedoutException e) { + String err = String.format("Could not send the convert instance command to host %s (%s) due to: %s", + convertHost.getId(), convertHost.getName(), e.getMessage()); + logger.error(err, e); + throw new CloudRuntimeException(err); + } + + if (!convertAnswer.getResult()) { + String err = String.format("The convert process failed for instance %s from VMware to KVM on host %s: %s", + sourceVM, convertHost.getName(), convertAnswer.getDetails()); + logger.error(err); + throw new CloudRuntimeException(err); + } + return ((ConvertInstanceAnswer) convertAnswer).getConvertedInstance(); + } + + private List findInstanceConversionStoragePoolsInCluster(Cluster destinationCluster) { + List pools = new ArrayList<>(); + List clusterPools = primaryDataStoreDao.findClusterWideStoragePoolsByHypervisorAndPoolType(destinationCluster.getId(), Hypervisor.HypervisorType.KVM, Storage.StoragePoolType.NetworkFilesystem); + pools.addAll(clusterPools); + List zonePools = primaryDataStoreDao.findZoneWideStoragePoolsByHypervisorAndPoolType(destinationCluster.getDataCenterId(), Hypervisor.HypervisorType.KVM, Storage.StoragePoolType.NetworkFilesystem); + pools.addAll(zonePools); + if (pools.isEmpty()) { + String msg = String.format("Cannot find suitable storage pools in cluster %s for the conversion", destinationCluster.getName()); + logger.error(msg); + throw new CloudRuntimeException(msg); + } + return pools; + } + + private List selectInstanceConversionStoragePools(List pools, List disks) { List storagePools = new ArrayList<>(disks.size()); - List pools = primaryDataStoreDao.listPoolsByCluster(destinationCluster.getId()); //TODO: Choose pools by capacity for (UnmanagedInstanceTO.Disk disk : disks) { Long capacity = disk.getCapacity(); @@ -1830,7 +1974,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { throw new CloudRuntimeException(msg); } - protected DataStoreTO selectInstanceConversionTemporaryLocation(Cluster destinationCluster, Long convertStoragePoolId, HostVO convertHost) { + protected DataStoreTO selectInstanceConversionTemporaryLocation(Cluster destinationCluster, Long convertStoragePoolId) { if (convertStoragePoolId != null) { StoragePoolVO selectedStoragePool = primaryDataStoreDao.findById(convertStoragePoolId); if (selectedStoragePool == null) { @@ -1841,11 +1985,10 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { logFailureAndThrowException(String.format("Cannot use the storage pool %s for the instance conversion as " + "it is not in the scope of the cluster %s", selectedStoragePool.getName(), destinationCluster.getName())); } - if (selectedStoragePool.getScope() == ScopeType.HOST && - storagePoolHostDao.findByPoolHost(selectedStoragePool.getId(), convertHost.getId()) == null) { - logFailureAndThrowException(String.format("The storage pool %s is not a local storage pool for the host %s", selectedStoragePool.getName(), convertHost.getName())); + if (selectedStoragePool.getScope() == ScopeType.HOST) { + logFailureAndThrowException(String.format("The storage pool %s is a local storage pool and not supported for temporary conversion location, cluster and zone wide NFS storage pools are supported", selectedStoragePool.getName())); } else if (selectedStoragePool.getPoolType() != Storage.StoragePoolType.NetworkFilesystem) { - logFailureAndThrowException(String.format("The storage pool %s is not supported for temporary conversion location, supported pools are NFS storage pools", selectedStoragePool.getName())); + logFailureAndThrowException(String.format("The storage pool %s is not supported for temporary conversion location, only NFS storage pools are supported", selectedStoragePool.getName())); } return dataStoreManager.getPrimaryDataStore(convertStoragePoolId).getTO(); } else { @@ -1964,6 +2107,13 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { throw new UnsupportedServiceException("Unmanage VM is currently allowed for guest VMs only"); } + if (vmVO.getType().equals(VirtualMachine.Type.User)) { + UserVmVO userVm = userVmDao.findById(vmId); + if (UserVmManager.SHAREDFSVM.equals(userVm.getUserVmType())) { + throw new InvalidParameterValueException("Operation not supported on Shared FileSystem Instance"); + } + } + performUnmanageVMInstancePrechecks(vmVO); Long hostId = findSuitableHostId(vmVO); @@ -2079,10 +2229,10 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { throw new InvalidParameterValueException("Username need to be provided."); } - HashMap instancesMap = getRemoteVms(zoneId, remoteUrl, cmd.getUsername(), cmd.getPassword()); + HashMap instancesMap = getRemoteVmsOnKVMHost(zoneId, remoteUrl, cmd.getUsername(), cmd.getPassword()); unmanagedInstanceTO = instancesMap.get(cmd.getName()); if (unmanagedInstanceTO == null) { - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Vm with name: %s not found on remote host", instanceName)); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM with name: %s not found on remote host %s", instanceName, remoteUrl)); } } @@ -2368,7 +2518,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Import failed for Vm: %s. Suitable deployment destination not found", userVm.getInstanceName())); } - Map storage = dest.getStorageForDisks(); Volume volume = volumeDao.findById(diskProfile.getVolumeId()); StoragePool storagePool = storage.get(volume); @@ -2388,7 +2537,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } diskProfile.setSize(checkVolumeAnswer.getSize()); - List> diskProfileStoragePoolList = new ArrayList<>(); try { long deviceId = 1L; @@ -2409,7 +2557,6 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { return userVm; } - private NetworkVO getDefaultNetwork(DataCenter zone, Account owner, boolean selectAny) throws InsufficientCapacityException, ResourceAllocationException { NetworkVO defaultNetwork = null; @@ -2460,14 +2607,13 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of deployVM process"); Network newNetwork = networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network", owner.getAccountName() + "-network", null, null, null, false, null, owner, null, physicalNetwork, zone.getId(), ControlledEntity.ACLType.Account, null, null, null, null, true, null, null, - null, null, null, null, null, null, null, null); + null, null, null, null, null, null, null, null, null); if (newNetwork != null) { defaultNetwork = networkDao.findById(newNetwork.getId()); } return defaultNetwork; } - //generate unit test public ListResponse listVmsForImport(ListVmsForImportCmd cmd) { final Account caller = CallContext.current().getCallingAccount(); if (caller.getType() != Account.Type.ADMIN) { @@ -2479,11 +2625,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { throw new InvalidParameterValueException("Please specify a valid zone."); } final String hypervisorType = cmd.getHypervisor(); - if (Hypervisor.HypervisorType.KVM.toString().equalsIgnoreCase(hypervisorType)) { - if (StringUtils.isBlank(cmd.getUsername())) { - throw new InvalidParameterValueException("Username need to be provided."); - } - } else { + if (!Hypervisor.HypervisorType.KVM.toString().equalsIgnoreCase(hypervisorType)) { throw new InvalidParameterValueException(String.format("VM Import is currently not supported for hypervisor: %s", hypervisorType)); } @@ -2493,7 +2635,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { } List responses = new ArrayList<>(); - HashMap vmMap = getRemoteVms(zoneId, cmd.getHost(), cmd.getUsername(), cmd.getPassword()); + HashMap vmMap = getRemoteVmsOnKVMHost(zoneId, cmd.getHost(), cmd.getUsername(), cmd.getPassword()); for (String key : vmMap.keySet()) { UnmanagedInstanceTO instance = vmMap.get(key); if (StringUtils.isNotEmpty(keyword) && @@ -2508,17 +2650,17 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { return listResponses; } - private HashMap getRemoteVms(long zoneId, String remoteUrl, String username, String password) { + private HashMap getRemoteVmsOnKVMHost(long zoneId, String remoteHostUrl, String username, String password) { //ToDo: add option to list one Vm by name List hosts = resourceManager.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.KVM, zoneId); - if(hosts.size() < 1) { - throw new CloudRuntimeException("No hosts available for Vm Import"); + if (hosts.size() < 1) { + throw new CloudRuntimeException("No hosts available to list VMs on remote host " + remoteHostUrl); } HostVO host = hosts.get(0); - GetRemoteVmsCommand getRemoteVmsCommand = new GetRemoteVmsCommand(remoteUrl, username, password); + GetRemoteVmsCommand getRemoteVmsCommand = new GetRemoteVmsCommand(remoteHostUrl, username, password); Answer answer = agentManager.easySend(host.getId(), getRemoteVmsCommand); if (!(answer instanceof GetRemoteVmsAnswer)) { - throw new CloudRuntimeException("Error while listing remote Vms"); + throw new CloudRuntimeException("Failed to list VMs, due to: " + answer.getDetails()); } GetRemoteVmsAnswer getRemoteVmsAnswer = (GetRemoteVmsAnswer) answer; return getRemoteVmsAnswer.getUnmanagedInstances(); @@ -2540,6 +2682,12 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[]{UnmanageVMPreserveNic}; + return new ConfigKey[]{ + UnmanageVMPreserveNic, + RemoteKvmInstanceDisksCopyTimeout, + ConvertVmwareInstanceToKvmTimeout, + ThreadsOnMSToImportVMwareVMFiles, + ThreadsOnKVMHostToImportVMwareVMFiles + }; } } diff --git a/server/src/main/java/org/apache/cloudstack/webhook/WebhookHelper.java b/server/src/main/java/org/apache/cloudstack/webhook/WebhookHelper.java new file mode 100644 index 00000000000..4f2305004a9 --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/webhook/WebhookHelper.java @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.webhook; + +import java.util.List; + +import org.apache.cloudstack.acl.ControlledEntity; + +public interface WebhookHelper { + void deleteWebhooksForAccount(long accountId); + + List listWebhooksByAccount(long accountId); +} diff --git a/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml b/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml index b5f72a1763c..1bf921f625e 100644 --- a/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml +++ b/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml @@ -56,6 +56,7 @@
    + @@ -81,6 +82,8 @@ value="#{resourceDiscoverersRegistry.registered}" /> + + @@ -107,7 +110,7 @@ - + @@ -145,6 +148,10 @@ + + + + @@ -318,7 +325,7 @@ - + @@ -332,6 +339,8 @@ + + @@ -355,6 +364,7 @@ + @@ -364,4 +374,10 @@ + + + + + + diff --git a/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-misc-context.xml b/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-misc-context.xml index 244c2d913b9..c633a3b0abd 100644 --- a/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-misc-context.xml +++ b/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-misc-context.xml @@ -72,7 +72,7 @@ - + @@ -80,5 +80,5 @@ - + diff --git a/server/src/main/resources/META-INF/cloudstack/server-alert-adapter-backend/spring-server-alert-adapter-backend-context.xml b/server/src/main/resources/META-INF/cloudstack/server-alert-adapter-backend/spring-server-alert-adapter-backend-context.xml index a8f83b012bd..e43c25ad655 100644 --- a/server/src/main/resources/META-INF/cloudstack/server-alert-adapter-backend/spring-server-alert-adapter-backend-context.xml +++ b/server/src/main/resources/META-INF/cloudstack/server-alert-adapter-backend/spring-server-alert-adapter-backend-context.xml @@ -28,5 +28,5 @@ > - + diff --git a/server/src/main/resources/META-INF/cloudstack/server-alert-adapter-compute/spring-server-alert-adapter-compute-context.xml b/server/src/main/resources/META-INF/cloudstack/server-alert-adapter-compute/spring-server-alert-adapter-compute-context.xml index 71cef6c4e1b..251c931e3a7 100644 --- a/server/src/main/resources/META-INF/cloudstack/server-alert-adapter-compute/spring-server-alert-adapter-compute-context.xml +++ b/server/src/main/resources/META-INF/cloudstack/server-alert-adapter-compute/spring-server-alert-adapter-compute-context.xml @@ -28,5 +28,5 @@ > - + diff --git a/server/src/main/resources/META-INF/cloudstack/server-alert-adapter-storage/spring-server-alert-adapter-storage-context.xml b/server/src/main/resources/META-INF/cloudstack/server-alert-adapter-storage/spring-server-alert-adapter-storage-context.xml index d765063f014..e19b41512ef 100644 --- a/server/src/main/resources/META-INF/cloudstack/server-alert-adapter-storage/spring-server-alert-adapter-storage-context.xml +++ b/server/src/main/resources/META-INF/cloudstack/server-alert-adapter-storage/spring-server-alert-adapter-storage-context.xml @@ -28,5 +28,5 @@ > - + diff --git a/server/src/main/resources/META-INF/cloudstack/server-allocator/spring-server-allocator-context.xml b/server/src/main/resources/META-INF/cloudstack/server-allocator/spring-server-allocator-context.xml index 664e7de8c65..d3781649845 100644 --- a/server/src/main/resources/META-INF/cloudstack/server-allocator/spring-server-allocator-context.xml +++ b/server/src/main/resources/META-INF/cloudstack/server-allocator/spring-server-allocator-context.xml @@ -44,5 +44,5 @@ - + diff --git a/server/src/main/resources/META-INF/cloudstack/server-fencer/spring-server-fencer-context.xml b/server/src/main/resources/META-INF/cloudstack/server-fencer/spring-server-fencer-context.xml index f11fea6eb2c..085cae52df3 100644 --- a/server/src/main/resources/META-INF/cloudstack/server-fencer/spring-server-fencer-context.xml +++ b/server/src/main/resources/META-INF/cloudstack/server-fencer/spring-server-fencer-context.xml @@ -27,11 +27,11 @@ http://www.springframework.org/schema/context/spring-context.xsd" > - + - + diff --git a/server/src/main/resources/META-INF/cloudstack/server-investigator/spring-server-investigator-context.xml b/server/src/main/resources/META-INF/cloudstack/server-investigator/spring-server-investigator-context.xml index dd5ce62147e..112384c53e4 100644 --- a/server/src/main/resources/META-INF/cloudstack/server-investigator/spring-server-investigator-context.xml +++ b/server/src/main/resources/META-INF/cloudstack/server-investigator/spring-server-investigator-context.xml @@ -42,5 +42,5 @@ - + diff --git a/server/src/main/resources/META-INF/cloudstack/server-template-adapter/spring-server-template-adapter-context.xml b/server/src/main/resources/META-INF/cloudstack/server-template-adapter/spring-server-template-adapter-context.xml index 96c3aa7f12c..b3df26b62a3 100644 --- a/server/src/main/resources/META-INF/cloudstack/server-template-adapter/spring-server-template-adapter-context.xml +++ b/server/src/main/resources/META-INF/cloudstack/server-template-adapter/spring-server-template-adapter-context.xml @@ -28,5 +28,5 @@ > - + diff --git a/server/src/main/resources/com/cloud/upgrade/databaseCreatorContext.xml b/server/src/main/resources/com/cloud/upgrade/databaseCreatorContext.xml index ba00d19781c..6b0312d4498 100644 --- a/server/src/main/resources/com/cloud/upgrade/databaseCreatorContext.xml +++ b/server/src/main/resources/com/cloud/upgrade/databaseCreatorContext.xml @@ -17,13 +17,13 @@ under the License. --> getProjectAccessCheckResources() { + Account caller = Mockito.mock(Account.class); + Mockito.when(caller.getId()).thenReturn(100L); + Mockito.when(caller.getType()).thenReturn(Account.Type.PROJECT); + ControlledEntity entity = getMockedEntity(2L); + AccountVO projectAccount = Mockito.mock(AccountVO.class); + Mockito.when(projectAccount.getId()).thenReturn(2L); + Mockito.when(projectAccount.getType()).thenReturn(Account.Type.PROJECT); + return new Ternary<>(caller, entity, projectAccount); + } + + @Test + public void testProjectOwnerCanModify() { + Ternary resources = getProjectAccessCheckResources(); + Account caller = resources.first(); + ControlledEntity entity = resources.second(); + AccountVO projectAccount = resources.third(); + Mockito.when(_accountDao.findById(entity.getAccountId())).thenReturn(projectAccount); + Mockito.when(_projectMgr.canModifyProjectAccount(caller, projectAccount.getId())).thenReturn(true); + Mockito.doReturn(true).when(domainChecker).checkOperationPermitted(caller, entity); + + domainChecker.validateCallerHasAccessToEntityOwner(caller, entity, SecurityChecker.AccessType.ModifyProject); + } + + @Test(expected = PermissionDeniedException.class) + public void testProjectOwnerCannotModify() { + Ternary resources = getProjectAccessCheckResources(); + Account caller = resources.first(); + ControlledEntity entity = resources.second(); + AccountVO projectAccount = resources.third(); + Mockito.when(_accountDao.findById(entity.getAccountId())).thenReturn(projectAccount); + Mockito.when(_projectMgr.canModifyProjectAccount(caller, projectAccount.getId())).thenReturn(false); + + domainChecker.validateCallerHasAccessToEntityOwner(caller, entity, SecurityChecker.AccessType.ModifyProject); + } + + @Test + public void testProjectOwnerCanAccess() { + Ternary resources = getProjectAccessCheckResources(); + Account caller = resources.first(); + ControlledEntity entity = resources.second(); + AccountVO projectAccount = resources.third(); + Mockito.when(_accountDao.findById(entity.getAccountId())).thenReturn(projectAccount); + Mockito.when(_projectMgr.canAccessProjectAccount(caller, projectAccount.getId())).thenReturn(true); + Mockito.doReturn(true).when(domainChecker).checkOperationPermitted(caller, entity); + + domainChecker.validateCallerHasAccessToEntityOwner(caller, entity, SecurityChecker.AccessType.ListEntry); + } + + @Test(expected = PermissionDeniedException.class) + public void testProjectOwnerCannotAccess() { + Ternary resources = getProjectAccessCheckResources(); + Account caller = resources.first(); + ControlledEntity entity = resources.second(); + AccountVO projectAccount = resources.third(); + Mockito.when(_accountDao.findById(entity.getAccountId())).thenReturn(projectAccount); + Mockito.when(_projectMgr.canAccessProjectAccount(caller, projectAccount.getId())).thenReturn(false); + + domainChecker.validateCallerHasAccessToEntityOwner(caller, entity, SecurityChecker.AccessType.ListEntry); + } + +} diff --git a/server/src/test/java/com/cloud/api/ApiServerTest.java b/server/src/test/java/com/cloud/api/ApiServerTest.java new file mode 100644 index 00000000000..fed1d95a625 --- /dev/null +++ b/server/src/test/java/com/cloud/api/ApiServerTest.java @@ -0,0 +1,150 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.api; + +import com.cloud.domain.Domain; +import com.cloud.user.UserAccount; +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.user.UserPasswordResetManager; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockedConstruction; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.cloudstack.user.UserPasswordResetManager.UserPasswordResetEnabled; + +@RunWith(MockitoJUnitRunner.class) +public class ApiServerTest { + + @InjectMocks + ApiServer apiServer = new ApiServer(); + + @Mock + UserPasswordResetManager userPasswordResetManager; + + @BeforeClass + public static void beforeClass() throws Exception { + overrideDefaultConfigValue(UserPasswordResetEnabled, "_value", true); + } + + @AfterClass + public static void afterClass() throws Exception { + overrideDefaultConfigValue(UserPasswordResetEnabled, "_value", false); + } + + private static void overrideDefaultConfigValue(final ConfigKey configKey, final String name, final Object o) throws IllegalAccessException, NoSuchFieldException { + Field f = ConfigKey.class.getDeclaredField(name); + f.setAccessible(true); + f.set(configKey, o); + } + + private void runTestSetupIntegrationPortListenerInvalidPorts(Integer port) { + try (MockedConstruction mocked = + Mockito.mockConstruction(ApiServer.ListenerThread.class)) { + apiServer.setupIntegrationPortListener(port); + Assert.assertTrue(mocked.constructed().isEmpty()); + } + } + + @Test + public void testSetupIntegrationPortListenerInvalidPorts() { + List ports = new ArrayList<>(List.of(-1, -10, 0)); + ports.add(null); + for (Integer port : ports) { + runTestSetupIntegrationPortListenerInvalidPorts(port); + } + } + + @Test + public void testSetupIntegrationPortListenerValidPort() { + Integer validPort = 8080; + try (MockedConstruction mocked = + Mockito.mockConstruction(ApiServer.ListenerThread.class)) { + apiServer.setupIntegrationPortListener(validPort); + Assert.assertFalse(mocked.constructed().isEmpty()); + ApiServer.ListenerThread listenerThread = mocked.constructed().get(0); + Mockito.verify(listenerThread).start(); + } + } + + @Test + public void testForgotPasswordSuccess() { + UserAccount userAccount = Mockito.mock(UserAccount.class); + Domain domain = Mockito.mock(Domain.class); + + Mockito.when(userAccount.getEmail()).thenReturn("test@test.com"); + Mockito.when(userAccount.getState()).thenReturn("ENABLED"); + Mockito.when(userAccount.getAccountState()).thenReturn("ENABLED"); + Mockito.when(domain.getState()).thenReturn(Domain.State.Active); + Mockito.doNothing().when(userPasswordResetManager).setResetTokenAndSend(userAccount); + Assert.assertTrue(apiServer.forgotPassword(userAccount, domain)); + Mockito.verify(userPasswordResetManager).setResetTokenAndSend(userAccount); + } + + @Test(expected = CloudRuntimeException.class) + public void testForgotPasswordFailureNoEmail() { + UserAccount userAccount = Mockito.mock(UserAccount.class); + Domain domain = Mockito.mock(Domain.class); + + Mockito.when(userAccount.getEmail()).thenReturn(""); + apiServer.forgotPassword(userAccount, domain); + } + + @Test(expected = CloudRuntimeException.class) + public void testForgotPasswordFailureDisabledUser() { + UserAccount userAccount = Mockito.mock(UserAccount.class); + Domain domain = Mockito.mock(Domain.class); + + Mockito.when(userAccount.getEmail()).thenReturn("test@test.com"); + Mockito.when(userAccount.getState()).thenReturn("DISABLED"); + apiServer.forgotPassword(userAccount, domain); + } + + @Test(expected = CloudRuntimeException.class) + public void testForgotPasswordFailureDisabledAccount() { + UserAccount userAccount = Mockito.mock(UserAccount.class); + Domain domain = Mockito.mock(Domain.class); + + Mockito.when(userAccount.getEmail()).thenReturn("test@test.com"); + Mockito.when(userAccount.getState()).thenReturn("ENABLED"); + Mockito.when(userAccount.getAccountState()).thenReturn("DISABLED"); + apiServer.forgotPassword(userAccount, domain); + } + + @Test(expected = CloudRuntimeException.class) + public void testForgotPasswordFailureInactiveDomain() { + UserAccount userAccount = Mockito.mock(UserAccount.class); + Domain domain = Mockito.mock(Domain.class); + + Mockito.when(userAccount.getEmail()).thenReturn("test@test.com"); + Mockito.when(userAccount.getState()).thenReturn("ENABLED"); + Mockito.when(userAccount.getAccountState()).thenReturn("ENABLED"); + Mockito.when(domain.getState()).thenReturn(Domain.State.Inactive); + apiServer.forgotPassword(userAccount, domain); + } +} diff --git a/server/src/test/java/com/cloud/api/dispatch/ParamProcessWorkerTest.java b/server/src/test/java/com/cloud/api/dispatch/ParamProcessWorkerTest.java index 22c0ba5a795..da70bc1c1bf 100644 --- a/server/src/test/java/com/cloud/api/dispatch/ParamProcessWorkerTest.java +++ b/server/src/test/java/com/cloud/api/dispatch/ParamProcessWorkerTest.java @@ -26,6 +26,7 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.User; +import org.apache.cloudstack.api.ApiArgValidator; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; @@ -63,6 +64,9 @@ public class ParamProcessWorkerTest { @Parameter(name = "doubleparam1", type = CommandType.DOUBLE) double doubleparam1; + @Parameter(name = "vmHostNameParam", type = CommandType.STRING, validations = {ApiArgValidator.RFCComplianceDomainName}) + String vmHostNameParam; + @Override public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { @@ -100,11 +104,44 @@ public class ParamProcessWorkerTest { params.put("intparam1", "100"); params.put("boolparam1", "true"); params.put("doubleparam1", "11.89"); + params.put("vmHostNameParam", "test-host-name-123"); final TestCmd cmd = new TestCmd(); paramProcessWorker.processParameters(cmd, params); Assert.assertEquals("foo", cmd.strparam1); Assert.assertEquals(100, cmd.intparam1); Assert.assertTrue(Double.compare(cmd.doubleparam1, 11.89) == 0); + Assert.assertEquals("test-host-name-123", cmd.vmHostNameParam); } + @Test(expected = ServerApiException.class) + public void processVmHostNameParameter_CannotStartWithDigit() { + final HashMap params = new HashMap(); + params.put("vmHostNameParam", "123test"); + final TestCmd cmd = new TestCmd(); + paramProcessWorker.processParameters(cmd, params); + } + + @Test(expected = ServerApiException.class) + public void processVmHostNameParameter_CannotStartWithHypen() { + final HashMap params = new HashMap(); + params.put("vmHostNameParam", "-test"); + final TestCmd cmd = new TestCmd(); + paramProcessWorker.processParameters(cmd, params); + } + + @Test(expected = ServerApiException.class) + public void processVmHostNameParameter_CannotEndWithHypen() { + final HashMap params = new HashMap(); + params.put("vmHostNameParam", "test-"); + final TestCmd cmd = new TestCmd(); + paramProcessWorker.processParameters(cmd, params); + } + + @Test(expected = ServerApiException.class) + public void processVmHostNameParameter_NotMoreThan63Chars() { + final HashMap params = new HashMap(); + params.put("vmHostNameParam", "test-f2405112-d5a1-47c1-9f00-976909e3a6d3-1e6f3264-955ee76011a99"); + final TestCmd cmd = new TestCmd(); + paramProcessWorker.processParameters(cmd, params); + } } diff --git a/server/src/test/java/com/cloud/api/query/QueryManagerImplTest.java b/server/src/test/java/com/cloud/api/query/QueryManagerImplTest.java index 91fd6911866..f5de105e22c 100644 --- a/server/src/test/java/com/cloud/api/query/QueryManagerImplTest.java +++ b/server/src/test/java/com/cloud/api/query/QueryManagerImplTest.java @@ -18,19 +18,26 @@ package com.cloud.api.query; import com.cloud.api.query.dao.TemplateJoinDao; +import com.cloud.api.query.dao.UserVmJoinDao; import com.cloud.api.query.vo.EventJoinVO; import com.cloud.api.query.vo.TemplateJoinVO; +import com.cloud.api.query.vo.UserVmJoinVO; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.dao.ClusterDao; import com.cloud.event.EventVO; import com.cloud.event.dao.EventDao; import com.cloud.event.dao.EventJoinDao; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; import com.cloud.network.Network; import com.cloud.network.VNF; import com.cloud.network.dao.NetworkVO; import com.cloud.server.ResourceTag; import com.cloud.storage.BucketVO; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.ScopeType; import com.cloud.storage.dao.BucketDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.user.Account; @@ -43,10 +50,14 @@ import com.cloud.utils.db.EntityManager; import com.cloud.utils.db.Filter; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.VMInstanceDao; + import org.apache.cloudstack.acl.SecurityChecker; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.command.admin.storage.ListObjectStoragePoolsCmd; +import org.apache.cloudstack.api.command.admin.vm.ListAffectedVmsForStorageScopeChangeCmd; import org.apache.cloudstack.api.command.user.bucket.ListBucketsCmd; import org.apache.cloudstack.api.command.user.event.ListEventsCmd; import org.apache.cloudstack.api.command.user.resource.ListDetailOptionsCmd; @@ -54,10 +65,13 @@ import org.apache.cloudstack.api.response.DetailOptionsResponse; import org.apache.cloudstack.api.response.EventResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.ObjectStoreResponse; +import org.apache.cloudstack.api.response.VirtualMachineResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.storage.datastore.db.ObjectStoreDao; import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO; import org.apache.commons.collections.CollectionUtils; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -68,6 +82,7 @@ import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; import java.util.ArrayList; import java.util.Arrays; @@ -115,11 +130,26 @@ public class QueryManagerImplTest { @Mock ObjectStoreDao objectStoreDao; + @Mock + VMInstanceDao vmInstanceDao; + + @Mock + PrimaryDataStoreDao storagePoolDao; + + @Mock + HostDao hostDao; + + @Mock + ClusterDao clusterDao; + @Mock BucketDao bucketDao; @Mock VMTemplateDao templateDao; + @Mock + UserVmJoinDao userVmJoinDao; + private AccountVO account; private UserVO user; @@ -406,4 +436,45 @@ public class QueryManagerImplTest { result = queryManager.getHostTagsFromTemplateForServiceOfferingsListing(account, templateId); Assert.assertTrue(CollectionUtils.isNotEmpty(result)); } + + public void testListAffectedVmsForScopeChange() { + Long clusterId = 1L; + Long poolId = 2L; + Long hostId = 3L; + Long vmId = 4L; + String vmName = "VM1"; + + ListAffectedVmsForStorageScopeChangeCmd cmd = new ListAffectedVmsForStorageScopeChangeCmd(); + ReflectionTestUtils.setField(cmd, "clusterIdForScopeChange", clusterId); + ReflectionTestUtils.setField(cmd, "storageId", poolId); + + StoragePoolVO pool = Mockito.mock(StoragePoolVO.class); + Mockito.when(pool.getScope()).thenReturn(ScopeType.CLUSTER); + Mockito.when(storagePoolDao.findById(poolId)).thenReturn(pool); + ListResponse response = queryManager.listAffectedVmsForStorageScopeChange(cmd); + Assert.assertEquals(response.getResponses().size(), 0); + + VMInstanceVO instance = Mockito.mock(VMInstanceVO.class); + UserVmJoinVO userVM = Mockito.mock(UserVmJoinVO.class); + String instanceUuid = String.valueOf(UUID.randomUUID()); + Pair, Integer> vms = new Pair<>(List.of(instance), 1); + HostVO host = Mockito.mock(HostVO.class); + ClusterVO cluster = Mockito.mock(ClusterVO.class); + + Mockito.when(pool.getScope()).thenReturn(ScopeType.ZONE); + Mockito.when(instance.getUuid()).thenReturn(instanceUuid); + Mockito.when(instance.getType()).thenReturn(VirtualMachine.Type.Instance); + Mockito.when(instance.getHostId()).thenReturn(hostId); + Mockito.when(instance.getId()).thenReturn(vmId); + Mockito.when(userVM.getDisplayName()).thenReturn(vmName); + Mockito.when(vmInstanceDao.listByVmsNotInClusterUsingPool(clusterId, poolId)).thenReturn(vms); + Mockito.when(userVmJoinDao.findById(vmId)).thenReturn(userVM); + Mockito.when(hostDao.findById(hostId)).thenReturn(host); + Mockito.when(host.getClusterId()).thenReturn(clusterId); + Mockito.when(clusterDao.findById(clusterId)).thenReturn(cluster); + + response = queryManager.listAffectedVmsForStorageScopeChange(cmd); + Assert.assertEquals(response.getResponses().get(0).getId(), instanceUuid); + Assert.assertEquals(response.getResponses().get(0).getName(), vmName); + } } diff --git a/server/src/test/java/com/cloud/bgp/BGPServiceImplTest.java b/server/src/test/java/com/cloud/bgp/BGPServiceImplTest.java new file mode 100644 index 00000000000..ace7bc85691 --- /dev/null +++ b/server/src/test/java/com/cloud/bgp/BGPServiceImplTest.java @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.bgp; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; + +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class BGPServiceImplTest { + + @Spy + @InjectMocks + BGPServiceImpl bGPServiceImplSpy = new BGPServiceImpl(); + + @Test + public void testASNumbersOverlap() { + Assert.assertEquals(bGPServiceImplSpy.isASNumbersOverlap(1,2,3,4), false); + Assert.assertEquals(bGPServiceImplSpy.isASNumbersOverlap(1,2,2,4), true); + Assert.assertEquals(bGPServiceImplSpy.isASNumbersOverlap(1,3,2,4), true); + Assert.assertEquals(bGPServiceImplSpy.isASNumbersOverlap(2,4,1,3), true); + Assert.assertEquals(bGPServiceImplSpy.isASNumbersOverlap(1,4,2,3), true); + Assert.assertEquals(bGPServiceImplSpy.isASNumbersOverlap(3,4,1,2), false); + } +} diff --git a/server/src/test/java/com/cloud/configuration/ConfigurationManagerImplTest.java b/server/src/test/java/com/cloud/configuration/ConfigurationManagerImplTest.java index f7606a9a962..0a045296821 100644 --- a/server/src/test/java/com/cloud/configuration/ConfigurationManagerImplTest.java +++ b/server/src/test/java/com/cloud/configuration/ConfigurationManagerImplTest.java @@ -24,6 +24,8 @@ import com.cloud.dc.dao.DataCenterIpAddressDao; import com.cloud.dc.dao.DedicatedResourceDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.dc.dao.VlanDao; +import com.cloud.domain.Domain; +import com.cloud.domain.dao.DomainDao; import com.cloud.exception.InvalidParameterValueException; import com.cloud.host.dao.HostDao; import com.cloud.network.Network; @@ -34,70 +36,69 @@ import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.NsxProviderDao; import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.element.NsxProviderVO; +import com.cloud.offering.DiskOffering; import com.cloud.offering.NetworkOffering; import com.cloud.offerings.NetworkOfferingVO; import com.cloud.offerings.dao.NetworkOfferingDao; +import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.StorageManager; import com.cloud.storage.dao.VMTemplateZoneDao; import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.Account; +import com.cloud.user.User; +import com.cloud.utils.db.EntityManager; +import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.net.NetUtils; import com.cloud.vm.dao.VMInstanceDao; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.command.admin.network.CreateNetworkOfferingCmd; +import org.apache.cloudstack.api.command.admin.offering.UpdateDiskOfferingCmd; import org.apache.cloudstack.api.command.admin.zone.DeleteZoneCmd; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.config.impl.ConfigurationVO; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; -import com.cloud.domain.Domain; -import com.cloud.domain.dao.DomainDao; -import com.cloud.offering.DiskOffering; -import com.cloud.storage.DiskOfferingVO; -import com.cloud.user.Account; -import com.cloud.user.User; -import com.cloud.utils.db.EntityManager; -import com.cloud.utils.db.SearchCriteria; -import org.apache.cloudstack.api.command.admin.offering.UpdateDiskOfferingCmd; import org.apache.cloudstack.resourcedetail.DiskOfferingDetailVO; -import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao; +import org.apache.cloudstack.vm.UnmanagedVMsManager; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.MockedStatic; import org.mockito.Mockito; +import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; import org.springframework.test.util.ReflectionTestUtils; -import java.util.Collections; -import org.mockito.InjectMocks; -import org.mockito.Spy; - import java.util.ArrayList; +import java.util.Collections; import java.util.List; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.lenient; -import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.anyBoolean; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.nullable; import static org.mockito.Mockito.anyMap; -import static org.mockito.Mockito.anyList; -import static org.mockito.Mockito.anyInt; -import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.lenient; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.nullable; import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) public class ConfigurationManagerImplTest { + + @InjectMocks + @Spy + ConfigurationManagerImpl configurationManagerImplSpy; @Mock ConfigDepot configDepot; - @InjectMocks - ConfigurationManagerImpl configurationManagerImplSpy = Mockito.spy(new ConfigurationManagerImpl()); @Mock SearchCriteria searchCriteriaDiskOfferingDetailMock; @Mock @@ -113,12 +114,13 @@ public class ConfigurationManagerImplTest { @Mock EntityManager entityManagerMock; @Mock - DiskOfferingDetailsDao diskOfferingDetailsDao; + ConfigurationVO configurationVOMock; + @Mock + ConfigKey configKeyMock; @Spy DiskOfferingVO diskOfferingVOSpy; @Mock UpdateDiskOfferingCmd updateDiskOfferingCmdMock; - @Mock NsxProviderDao nsxProviderDao; @Mock @@ -173,24 +175,13 @@ public class ConfigurationManagerImplTest { @Before public void setUp() throws Exception { - configurationManagerImplSpy._configDepot = configDepot; - configurationManagerImplSpy.nsxProviderDao = nsxProviderDao; - configurationManagerImplSpy._zoneDao = zoneDao; - configurationManagerImplSpy._hostDao = hostDao; - configurationManagerImplSpy._podDao = podDao; - configurationManagerImplSpy._privateIpAddressDao = ipAddressDao; - configurationManagerImplSpy._publicIpAddressDao = publicIpAddressDao; - configurationManagerImplSpy._vmInstanceDao = vmInstanceDao; - configurationManagerImplSpy._volumeDao = volumeDao; - configurationManagerImplSpy._physicalNetworkDao = physicalNetworkDao; - configurationManagerImplSpy._imageStoreDao = imageStoreDao; - configurationManagerImplSpy._vlanDao = vlanDao; - configurationManagerImplSpy._capacityDao = capacityDao; - configurationManagerImplSpy._dedicatedDao = dedicatedResourceDao; - configurationManagerImplSpy._configDao = configDao; - configurationManagerImplSpy._networkOfferingDao = networkOfferingDao; - configurationManagerImplSpy._networkSvc = networkService; - configurationManagerImplSpy._networkModel = networkModel; + Mockito.when(configurationVOMock.getScope()).thenReturn(ConfigKey.Scope.Global.name()); + Mockito.when(configDao.findByName(Mockito.anyString())).thenReturn(configurationVOMock); + Mockito.when(configDepot.get(Mockito.anyString())).thenReturn(configKeyMock); + + configurationManagerImplSpy.populateConfigValuesForValidationSet(); + configurationManagerImplSpy.weightBasedParametersForValidation(); + configurationManagerImplSpy.overProvisioningFactorsForValidation(); ReflectionTestUtils.setField(configurationManagerImplSpy, "templateZoneDao", vmTemplateZoneDao); ReflectionTestUtils.setField(configurationManagerImplSpy, "annotationDao", annotationDao); @@ -204,7 +195,6 @@ public class ConfigurationManagerImplTest { Assert.assertNull(testVariable); } - @Test public void validateIfIntValueIsInRangeTestInvalidValueReturnString() { String testVariable = configurationManagerImplSpy.validateIfIntValueIsInRange("String name", "9", "1-5"); @@ -438,17 +428,12 @@ public class ConfigurationManagerImplTest { NetworkOfferingVO offeringVO = Mockito.mock(NetworkOfferingVO.class); when(createNetworkOfferingCmd.isForNsx()).thenReturn(true); - when(createNetworkOfferingCmd.getNsxMode()).thenReturn(NetworkOffering.NsxMode.NATTED.name()); + when(createNetworkOfferingCmd.getNetworkMode()).thenReturn(NetworkOffering.NetworkMode.NATTED.name()); when(createNetworkOfferingCmd.getTraffictype()).thenReturn(Networks.TrafficType.Guest.name()); when(createNetworkOfferingCmd.getGuestIpType()).thenReturn(Network.GuestType.Isolated.name()); when(createNetworkOfferingCmd.getAvailability()).thenReturn(NetworkOffering.Availability.Optional.name()); - lenient().when(configurationManagerImplSpy.createNetworkOffering(anyString(), anyString(), any(Networks.TrafficType.class), anyString(), - anyBoolean(), any(NetworkOffering.Availability.class), anyInt(), anyMap(), anyBoolean(), any(Network.GuestType.class), - anyBoolean(), anyLong(), anyBoolean(), anyMap(), anyBoolean(), anyBoolean(), anyMap(), anyBoolean(), anyInt(), - anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean(), anyString(), anyList(), anyList(), anyBoolean(), any(NetUtils.InternetProtocol.class))) - .thenReturn(offeringVO); when(configDao.getValue(anyString())).thenReturn("1000"); - lenient().when(networkOfferingDao.persist(any(NetworkOfferingVO.class), anyMap())).thenReturn(offeringVO); + when(networkOfferingDao.persist(any(NetworkOfferingVO.class), anyMap())).thenReturn(offeringVO); doNothing().when(networkService).validateIfServiceOfferingIsActiveAndSystemVmTypeIsDomainRouter(anyLong()); doNothing().when(networkModel).canProviderSupportServices(anyMap()); @@ -457,6 +442,71 @@ public class ConfigurationManagerImplTest { Assert.assertNotNull(offering); } + public void testValidateInvalidConfiguration() { + Mockito.doReturn(null).when(configDao).findByName(Mockito.anyString()); + String msg = configurationManagerImplSpy.validateConfigurationValue("test.config.name", "testvalue", ConfigKey.Scope.Global.toString()); + Assert.assertEquals("Invalid configuration variable.", msg); + } + + @Test + public void testValidateInvalidScopeForConfiguration() { + ConfigurationVO cfg = mock(ConfigurationVO.class); + when(cfg.getScope()).thenReturn(ConfigKey.Scope.Account.toString()); + Mockito.doReturn(cfg).when(configDao).findByName(Mockito.anyString()); + String msg = configurationManagerImplSpy.validateConfigurationValue("test.config.name", "testvalue", ConfigKey.Scope.Domain.toString()); + Assert.assertEquals("Invalid scope id provided for the parameter test.config.name", msg); + } + + @Test + public void testValidateConfig_ThreadsOnKVMHostToTransferVMwareVMFiles_Failure() { + ConfigurationVO cfg = mock(ConfigurationVO.class); + when(cfg.getScope()).thenReturn(ConfigKey.Scope.Global.toString()); + ConfigKey configKey = UnmanagedVMsManager.ThreadsOnKVMHostToImportVMwareVMFiles; + Mockito.doReturn(cfg).when(configDao).findByName(Mockito.anyString()); + Mockito.doReturn(configKey).when(configurationManagerImplSpy._configDepot).get(configKey.key()); + + String result = configurationManagerImplSpy.validateConfigurationValue(configKey.key(), "11", configKey.scope().toString()); + + Assert.assertNotNull(result); + } + + @Test + public void testValidateConfig_ThreadsOnKVMHostToTransferVMwareVMFiles_Success() { + ConfigurationVO cfg = mock(ConfigurationVO.class); + when(cfg.getScope()).thenReturn(ConfigKey.Scope.Global.toString()); + ConfigKey configKey = UnmanagedVMsManager.ThreadsOnKVMHostToImportVMwareVMFiles; + Mockito.doReturn(cfg).when(configDao).findByName(Mockito.anyString()); + Mockito.doReturn(configKey).when(configurationManagerImplSpy._configDepot).get(configKey.key()); + String msg = configurationManagerImplSpy.validateConfigurationValue(configKey.key(), "10", configKey.scope().toString()); + Assert.assertNull(msg); + } + + @Test + public void testValidateConfig_ConvertVmwareInstanceToKvmTimeout_Failure() { + ConfigurationVO cfg = mock(ConfigurationVO.class); + when(cfg.getScope()).thenReturn(ConfigKey.Scope.Global.toString()); + ConfigKey configKey = UnmanagedVMsManager.ConvertVmwareInstanceToKvmTimeout; + Mockito.doReturn(cfg).when(configDao).findByName(Mockito.anyString()); + Mockito.doReturn(configKey).when(configurationManagerImplSpy._configDepot).get(configKey.key()); + configurationManagerImplSpy.populateConfigValuesForValidationSet(); + + String result = configurationManagerImplSpy.validateConfigurationValue(configKey.key(), "0", configKey.scope().toString()); + + Assert.assertNotNull(result); + } + + @Test + public void testValidateConfig_ConvertVmwareInstanceToKvmTimeout_Success() { + ConfigurationVO cfg = mock(ConfigurationVO.class); + when(cfg.getScope()).thenReturn(ConfigKey.Scope.Global.toString()); + ConfigKey configKey = UnmanagedVMsManager.ConvertVmwareInstanceToKvmTimeout; + Mockito.doReturn(cfg).when(configDao).findByName(Mockito.anyString()); + Mockito.doReturn(configKey).when(configurationManagerImplSpy._configDepot).get(configKey.key()); + configurationManagerImplSpy.populateConfigValuesForValidationSet(); + String msg = configurationManagerImplSpy.validateConfigurationValue(configKey.key(), "9", configKey.scope().toString()); + Assert.assertNull(msg); + } + @Test public void validateDomainTestInvalidIdThrowException() { Mockito.doReturn(null).when(domainDaoMock).findById(invalidId); @@ -564,4 +614,241 @@ public class ConfigurationManagerImplTest { Assert.assertThrows(InvalidParameterValueException.class, () -> configurationManagerImplSpy.checkIfDomainIsChildDomain(diskOfferingMock, accountMock, userMock, filteredDomainIds)); } + + @Test + public void validateConfigurationValueTestValidatesValueType() { + Mockito.when(configKeyMock.type()).thenReturn(Integer.class); + configurationManagerImplSpy.validateConfigurationValue("validate.type", "100", ConfigKey.Scope.Global.name()); + Mockito.verify(configurationManagerImplSpy).validateValueType("100", Integer.class); + } + + @Test + public void validateConfigurationValueTestValidatesValueRange() { + Mockito.when(configKeyMock.type()).thenReturn(Integer.class); + configurationManagerImplSpy.validateConfigurationValue("validate.range", "100", ConfigKey.Scope.Global.name()); + Mockito.verify(configurationManagerImplSpy).validateValueRange("validate.range", "100", Integer.class, null); + } + + @Test + public void validateValueTypeTestReturnsTrueWhenValueIsNullAndTypeIsString() { + Assert.assertTrue(configurationManagerImplSpy.validateValueType(null, String.class)); + } + + @Test + public void validateValueTypeTestReturnsTrueWhenValueIsNumericAndTypeIsString() { + Assert.assertTrue(configurationManagerImplSpy.validateValueType("1", String.class)); + } + + @Test + public void validateValueTypeTestReturnsTrueWhenValueIsStringAndTypeIsString() { + Assert.assertTrue(configurationManagerImplSpy.validateValueType("test", String.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueIsNullAndTypeIsBoolean() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType(null, Boolean.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueIsNumericAndTypeIsBoolean() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType("1", Boolean.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueIsStringAndTypeIsBoolean() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType("test", Boolean.class)); + } + + @Test + public void validateValueTypeTestReturnsTrueWhenValueIsTrueAndTypeIsBoolean() { + Assert.assertTrue(configurationManagerImplSpy.validateValueType("true", Boolean.class)); + + } + + @Test + public void validateValueTypeTestReturnsTrueWhenValueIsFalseAndTypeIsBoolean() { + Assert.assertTrue(configurationManagerImplSpy.validateValueType("false", Boolean.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueIsNullAndTypeIsInteger() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType(null, Integer.class)); + } + + @Test + public void validateValueTypeTestReturnsTrueWhenValueIsIntegerAndTypeIsInteger() { + Assert.assertTrue(configurationManagerImplSpy.validateValueType("-2147483647", Integer.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueExceedsIntegerLimitAndTypeIsInteger() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType("2147483648", Integer.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueIsDecimalAndTypeIsInteger() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType("1.1", Integer.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueIsStringAndTypeIsInteger() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType("test", Integer.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueIsNullAndTypeIsShort() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType(null, Short.class)); + } + + @Test + public void validateValueTypeTestReturnsTrueWhenValueIsIntegerAndTypeIsShort() { + Assert.assertTrue(configurationManagerImplSpy.validateValueType("-32768", Short.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueExceedsShortLimitAndTypeIsShort() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType("32768", Short.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueIsDecimalAndTypeIsShort() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType("1.1", Short.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueIsStringAndTypeIsShort() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType("test", Short.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueIsNullAndTypeIsLong() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType(null, Long.class)); + } + + @Test + public void validateValueTypeTestReturnsTrueWhenValueIsIntegerAndTypeIsLong() { + Assert.assertTrue(configurationManagerImplSpy.validateValueType("-9223372036854775807", Long.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueExceedsLongLimitAndTypeIsLong() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType("9223372036854775808", Long.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueIsDecimalAndTypeIsLong() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType("1.1", Long.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueIsStringAndTypeIsLong() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType("test", Long.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueIsNullAndTypeIsFloat() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType(null, Float.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueIsInfiniteAndTypeIsFloat() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType("9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999", Float.class)); + } + + @Test + public void validateValueTypeTestReturnsTrueWhenValueIsNumericAndTypeIsFloat() { + Assert.assertTrue(configurationManagerImplSpy.validateValueType("1.1", Float.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueIsStringAndTypeIsFloat() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType("test", Float.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueIsNullAndTypeIsDouble() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType(null, Double.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueIsInfiniteAndTypeIsDouble() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType("9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999", Double.class)); + } + + @Test + public void validateValueTypeTestReturnsTrueWhenValueIsNumericAndTypeIsDouble() { + Assert.assertTrue(configurationManagerImplSpy.validateValueType("1.1", Double.class)); + } + + @Test + public void validateValueTypeTestReturnsFalseWhenValueIsStringAndTypeIsDouble() { + Assert.assertFalse(configurationManagerImplSpy.validateValueType("test", Double.class)); + } + + @Test + public void validateValueRangeTestReturnsNullWhenConfigKeyHasNoRange() { + Assert.assertNull(configurationManagerImplSpy.validateValueRange("configkey.without.range", "0", Integer.class, null)); + } + + @Test + public void validateValueRangeTestReturnsNullWhenConfigKeyHasRangeAndValueIsValid() { + Assert.assertNull(configurationManagerImplSpy.validateValueRange(NetworkModel.MACIdentifier.key(), "100", Integer.class, null)); + } + + @Test + public void validateValueRangeTestReturnsNotNullWhenConfigKeyHasRangeAndValueIsInvalid() { + Assert.assertNotNull(configurationManagerImplSpy.validateValueRange(NetworkModel.MACIdentifier.key(), "-1", Integer.class, null)); + } + + @Test + public void validateValueRangeTestValidatesValueWhenConfigHasRange() { + Config config = Config.SecStorageEncryptCopy; + String name = config.name(); + String value = "value"; + String expectedResult = "expectedResult"; + + Mockito.doReturn(expectedResult).when(configurationManagerImplSpy).validateIfStringValueIsInRange(name, value, config.getRange().split(",")); + + String result = configurationManagerImplSpy.validateValueRange(name, value, config.getType(), config); + + Assert.assertEquals(expectedResult, result); + } + + @Test + public void validateValueRangeTestValidatesIntValueWhenConfigHasNumericRange() { + Config config = Config.RouterExtraPublicNics; + String name = config.name(); + String value = "1"; + String expectedResult = "expectedResult"; + + Mockito.doReturn(expectedResult).when(configurationManagerImplSpy).validateIfIntValueIsInRange(name, value, config.getRange()); + + String result = configurationManagerImplSpy.validateValueRange(name, value, config.getType(), config); + + Assert.assertEquals(expectedResult, result); + } + + @Test + public void shouldValidateConfigRangeTestValueIsNullReturnFalse() { + boolean result = configurationManagerImplSpy.shouldValidateConfigRange(Config.ConsoleProxyUrlDomain.name(), null, Config.ConsoleProxyUrlDomain); + Assert.assertFalse(result); + } + + @Test + public void shouldValidateConfigRangeTestConfigIsNullReturnFalse() { + boolean result = configurationManagerImplSpy.shouldValidateConfigRange("", "test", null); + Assert.assertFalse(result); + } + + @Test + public void shouldValidateConfigRangeTestConfigDoesNotHaveARangeReturnFalse() { + boolean result = configurationManagerImplSpy.shouldValidateConfigRange(Config.ConsoleProxySessionMax.name(), "test", Config.ConsoleProxySessionMax); + Assert.assertFalse(result); + } + + @Test + public void shouldValidateConfigRangeTestValueIsNotNullAndConfigHasRangeReturnTrue() { + boolean result = configurationManagerImplSpy.shouldValidateConfigRange(Config.ConsoleProxySessionMax.name(), "test", Config.ConsoleProxyUrlDomain); + Assert.assertTrue(result); + } } diff --git a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java index 312719eb850..4ae871e1ba5 100644 --- a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java +++ b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java @@ -116,6 +116,7 @@ import java.lang.reflect.Field; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Random; @@ -629,11 +630,11 @@ public class ConfigurationManagerTest { @Test public void isRedundantRouter() { - Map> serviceCapabilityMap = new HashMap<>(); + Set providers = new HashSet<>(); Map sourceNatServiceCapabilityMap = new HashMap<>(); sourceNatServiceCapabilityMap.put(Capability.SupportedSourceNatTypes, "peraccount"); sourceNatServiceCapabilityMap.put(Capability.RedundantRouter, "true"); - Assert.assertTrue(configurationMgr.isRedundantRouter(serviceCapabilityMap, sourceNatServiceCapabilityMap)); + Assert.assertTrue(configurationMgr.isRedundantRouter(providers, Network.Service.SourceNat, sourceNatServiceCapabilityMap)); } @Test diff --git a/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java b/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java index 3afd3dc4a95..482d17908f4 100644 --- a/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java +++ b/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java @@ -21,6 +21,7 @@ import com.cloud.agent.AgentManager; import com.cloud.capacity.CapacityManager; import com.cloud.capacity.dao.CapacityDao; import com.cloud.configuration.ConfigurationManagerImpl; +import com.cloud.cpu.CPU; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterDetailsVO; import com.cloud.dc.ClusterVO; @@ -295,6 +296,10 @@ public class DeploymentPlanningManagerImplTest { DataCenterDeployment plan = new DataCenterDeployment(dataCenterId); Mockito.when(avoids.shouldAvoid((DataCenterVO) ArgumentMatchers.any())).thenReturn(true); + VirtualMachineTemplate template = Mockito.mock(VirtualMachineTemplate.class); + Mockito.when(template.getArch()).thenReturn(CPU.CPUArch.amd64); + Mockito.when(vmProfile.getTemplate()).thenReturn(template); + Mockito.when(_clusterDao.listClustersByArchAndZoneId(dataCenterId, CPU.CPUArch.arm64)).thenReturn(null); DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids, null); assertNull("DataCenter is in avoid set, destination should be null! ", dest); } @@ -310,6 +315,10 @@ public class DeploymentPlanningManagerImplTest { Mockito.when(avoids.shouldAvoid((DataCenterVO) ArgumentMatchers.any())).thenReturn(false); Mockito.when(_planner.canHandle(vmProfile, plan, avoids)).thenReturn(false); + VirtualMachineTemplate template = Mockito.mock(VirtualMachineTemplate.class); + Mockito.when(template.getArch()).thenReturn(CPU.CPUArch.amd64); + Mockito.when(vmProfile.getTemplate()).thenReturn(template); + Mockito.when(_clusterDao.listClustersByArchAndZoneId(dataCenterId, CPU.CPUArch.arm64)).thenReturn(null); DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids, null); assertNull("Planner cannot handle, destination should be null! ", dest); } @@ -326,6 +335,10 @@ public class DeploymentPlanningManagerImplTest { Mockito.when(_planner.canHandle(vmProfile, plan, avoids)).thenReturn(true); Mockito.when(((DeploymentClusterPlanner) _planner).orderClusters(vmProfile, plan, avoids)).thenReturn(null); + VirtualMachineTemplate template = Mockito.mock(VirtualMachineTemplate.class); + Mockito.when(template.getArch()).thenReturn(CPU.CPUArch.amd64); + Mockito.when(vmProfile.getTemplate()).thenReturn(template); + Mockito.when(_clusterDao.listClustersByArchAndZoneId(dataCenterId, CPU.CPUArch.arm64)).thenReturn(null); DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids, null); assertNull("Planner cannot handle, destination should be null! ", dest); } diff --git a/server/src/test/java/com/cloud/event/ActionEventInterceptorTest.java b/server/src/test/java/com/cloud/event/ActionEventInterceptorTest.java index 109cb585d8e..f655c0d74c0 100644 --- a/server/src/test/java/com/cloud/event/ActionEventInterceptorTest.java +++ b/server/src/test/java/com/cloud/event/ActionEventInterceptorTest.java @@ -140,6 +140,7 @@ public class ActionEventInterceptorTest { } utils.init(); + CallContext.register(user, account); } /** @@ -152,6 +153,7 @@ public class ActionEventInterceptorTest { Mockito.when(configDao.getValue(Config.PublishActionEvent.key())).thenReturn("true"); componentContextMocked = Mockito.mockStatic(ComponentContext.class); Mockito.when(ComponentContext.getComponent(EventBus.class)).thenReturn(eventBus); + persistedEvents = new ArrayList<>(); //Needed for persist to actually set an ID that can be returned from the ActionEventUtils //methods. @@ -231,11 +233,11 @@ public class ActionEventInterceptorTest { Object event = actionEventInterceptor.interceptStart(m, tester); Assert.assertNull(event); - Assert.assertEquals(persistedEvents.size(), 1); + Assert.assertEquals(1, persistedEvents.size()); EventVO eventVO = persistedEvents.get(0); - Assert.assertEquals(eventVO.getType(), EventTypes.EVENT_VM_START); - Assert.assertEquals(eventVO.getDescription(), "Starting VM"); - Assert.assertEquals(eventVO.getState(), com.cloud.event.Event.State.Started); + Assert.assertEquals(EventTypes.EVENT_VM_START, eventVO.getType()); + Assert.assertEquals(eventDescription, eventVO.getDescription()); + Assert.assertEquals(com.cloud.event.Event.State.Started, eventVO.getState()); } @Test @@ -244,12 +246,12 @@ public class ActionEventInterceptorTest { Method m = tester.getClass().getMethod("testMethod"); actionEventInterceptor.interceptComplete(m, tester, null); - Assert.assertEquals(persistedEvents.size(), 1); + Assert.assertEquals(1, persistedEvents.size()); EventVO eventVO = persistedEvents.get(0); - Assert.assertEquals(eventVO.getType(), eventType); + Assert.assertEquals(eventType, eventVO.getType()); Assert.assertTrue(eventVO.getDescription().endsWith(eventDescription)); - Assert.assertEquals(eventVO.getLevel(), EventVO.LEVEL_INFO); - Assert.assertEquals(eventVO.getState(), com.cloud.event.Event.State.Completed); + Assert.assertEquals(EventVO.LEVEL_INFO, eventVO.getLevel()); + Assert.assertEquals(com.cloud.event.Event.State.Completed, eventVO.getState()); } @Test @@ -258,12 +260,12 @@ public class ActionEventInterceptorTest { Method m = tester.getClass().getMethod("testMethod"); actionEventInterceptor.interceptException(m, tester, null); - Assert.assertEquals(persistedEvents.size(), 1); + Assert.assertEquals(1, persistedEvents.size()); EventVO eventVO = persistedEvents.get(0); - Assert.assertEquals(eventVO.getType(), eventType); + Assert.assertEquals(eventType, eventVO.getType()); Assert.assertTrue(eventVO.getDescription().endsWith(eventDescription)); - Assert.assertEquals(eventVO.getLevel(), EventVO.LEVEL_ERROR); - Assert.assertEquals(eventVO.getState(), com.cloud.event.Event.State.Completed); + Assert.assertEquals(EventVO.LEVEL_ERROR, eventVO.getLevel()); + Assert.assertEquals(com.cloud.event.Event.State.Completed, eventVO.getState()); } @Test @@ -276,14 +278,14 @@ public class ActionEventInterceptorTest { Method m = tester.getClass().getMethod("testMethod"); actionEventInterceptor.interceptException(m, tester, null); - Assert.assertEquals(persistedEvents.size(), 1); + Assert.assertEquals(1, persistedEvents.size()); EventVO eventVO = persistedEvents.get(0); - Assert.assertEquals(eventVO.getType(), eventType); + Assert.assertEquals(eventType, eventVO.getType()); Assert.assertTrue(eventVO.getDescription().endsWith(eventDescription)); - Assert.assertEquals(eventVO.getLevel(), EventVO.LEVEL_ERROR); - Assert.assertEquals(eventVO.getState(), com.cloud.event.Event.State.Completed); - Assert.assertEquals(eventVO.getResourceId(), resourceId); - Assert.assertEquals(eventVO.getResourceType(), resourceType.toString()); + Assert.assertEquals(EventVO.LEVEL_ERROR, eventVO.getLevel()); + Assert.assertEquals(com.cloud.event.Event.State.Completed, eventVO.getState()); + Assert.assertEquals(resourceId, eventVO.getResourceId()); + Assert.assertEquals(resourceType.toString(), eventVO.getResourceType()); } @Test diff --git a/server/src/test/java/com/cloud/event/ActionEventUtilsTest.java b/server/src/test/java/com/cloud/event/ActionEventUtilsTest.java index aed28702df5..aba8acf59c2 100644 --- a/server/src/test/java/com/cloud/event/ActionEventUtilsTest.java +++ b/server/src/test/java/com/cloud/event/ActionEventUtilsTest.java @@ -29,7 +29,8 @@ import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.events.Event; -import org.apache.cloudstack.framework.events.EventBus; +import org.apache.cloudstack.framework.events.EventBusException; +import org.apache.cloudstack.framework.events.EventDistributor; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -97,7 +98,7 @@ public class ActionEventUtilsTest { protected ConfigurationDao configDao; @Mock - protected EventBus eventBus; + protected EventDistributor eventDistributor; private AccountVO account; private UserVO user; @@ -149,7 +150,7 @@ public class ActionEventUtilsTest { //Some basic mocks. Mockito.when(configDao.getValue(Config.PublishActionEvent.key())).thenReturn("true"); componentContextMocked = Mockito.mockStatic(ComponentContext.class); - componentContextMocked.when(() -> ComponentContext.getComponent(EventBus.class)).thenReturn(eventBus); + componentContextMocked.when(() -> ComponentContext.getComponent(EventDistributor.class)).thenReturn(eventDistributor); //Needed for persist to actually set an ID that can be returned from the ActionEventUtils //methods. @@ -166,14 +167,11 @@ public class ActionEventUtilsTest { }); //Needed to record events published on the bus. - Mockito.doAnswer(new Answer() { - @Override public Void answer(InvocationOnMock invocation) throws Throwable { - Event event = (Event)invocation.getArguments()[0]; - publishedEvents.add(event); - return null; - } - - }).when(eventBus).publish(Mockito.any(Event.class)); + Mockito.doAnswer((Answer>) invocation -> { + Event event = (Event)invocation.getArguments()[0]; + publishedEvents.add(event); + return new HashMap<>(); + }).when(eventDistributor).publish(Mockito.any(Event.class)); account = new AccountVO("testaccount", 1L, "networkdomain", Account.Type.NORMAL, "uuid"); account.setId(ACCOUNT_ID); diff --git a/server/src/test/java/com/cloud/ha/HighAvailabilityManagerImplTest.java b/server/src/test/java/com/cloud/ha/HighAvailabilityManagerImplTest.java index fcd3c373add..74897967a2f 100644 --- a/server/src/test/java/com/cloud/ha/HighAvailabilityManagerImplTest.java +++ b/server/src/test/java/com/cloud/ha/HighAvailabilityManagerImplTest.java @@ -60,6 +60,7 @@ import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.network.VpcVirtualNetworkApplianceService; import com.cloud.resource.ResourceManager; import com.cloud.server.ManagementServer; import com.cloud.service.ServiceOfferingVO; @@ -70,6 +71,7 @@ import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.user.AccountManager; +import com.cloud.vm.UserVmManager; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineManager; @@ -126,6 +128,10 @@ public class HighAvailabilityManagerImplTest { VolumeDao volumeDao; @Mock DataStoreProviderManager dataStoreProviderMgr; + @Mock + VpcVirtualNetworkApplianceService routerService; + @Mock + UserVmManager userVmManager; HighAvailabilityManagerImpl highAvailabilityManager; HighAvailabilityManagerImpl highAvailabilityManagerSpy; diff --git a/server/src/test/java/com/cloud/ha/dao/HighAvailabilityDaoImplTest.java b/server/src/test/java/com/cloud/ha/dao/HighAvailabilityDaoImplTest.java new file mode 100644 index 00000000000..783497740fd --- /dev/null +++ b/server/src/test/java/com/cloud/ha/dao/HighAvailabilityDaoImplTest.java @@ -0,0 +1,68 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.ha.dao; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import com.cloud.ha.HaWorkVO; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class HighAvailabilityDaoImplTest { + + @Spy + HighAvailabilityDaoImpl highAvailabilityDaoImpl; + + @Test + public void testExpungeByVmListNoVms() { + Assert.assertEquals(0, highAvailabilityDaoImpl.expungeByVmList( + new ArrayList<>(), 100L)); + Assert.assertEquals(0, highAvailabilityDaoImpl.expungeByVmList( + null, 100L)); + } + + @Test + public void testExpungeByVmList() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doAnswer((Answer) invocationOnMock -> { + Long batchSize = (Long)invocationOnMock.getArguments()[1]; + return batchSize == null ? 0 : batchSize.intValue(); + }).when(highAvailabilityDaoImpl).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); + Mockito.when(highAvailabilityDaoImpl.createSearchBuilder()).thenReturn(sb); + final HaWorkVO mockedVO = Mockito.mock(HaWorkVO.class); + Mockito.when(sb.entity()).thenReturn(mockedVO); + List vmIds = List.of(1L, 2L); + Object[] array = vmIds.toArray(); + Long batchSize = 50L; + Assert.assertEquals(batchSize.intValue(), highAvailabilityDaoImpl.expungeByVmList(List.of(1L, 2L), batchSize)); + Mockito.verify(sc).setParameters("vmIds", array); + Mockito.verify(highAvailabilityDaoImpl, Mockito.times(1)) + .batchExpunge(sc, batchSize); + } +} diff --git a/server/src/test/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscovererTest.java b/server/src/test/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscovererTest.java new file mode 100644 index 00000000000..aaf5a04b74a --- /dev/null +++ b/server/src/test/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscovererTest.java @@ -0,0 +1,54 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.discoverer; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class LibvirtServerDiscovererTest { + + @Spy + private LibvirtServerDiscoverer libvirtServerDiscoverer; + + @Test + public void validateCompatibleOses() { + validateCompatibleOs("Rocky Linux", "Rocky Linux", true); + validateCompatibleOs("Rocky", "Rocky Linux", true); + validateCompatibleOs("Red", "Red Hat Enterprise Linux", true); + validateCompatibleOs("Oracle", "Oracle Linux Server", true); + validateCompatibleOs("Rocky Linux", "Red Hat Enterprise Linux", true); + validateCompatibleOs("AlmaLinux", "Red Hat Enterprise Linux", true); + + validateCompatibleOs("Windows", "Rocky Linux", false); + validateCompatibleOs("SUSE", "Rocky Linux", false); + } + + private void validateCompatibleOs(String hostOsInCluster, String hostOs, boolean expected) { + if (expected) { + Assert.assertTrue(libvirtServerDiscoverer.isHostOsCompatibleWithOtherHost(hostOsInCluster, hostOs)); + } else { + Assert.assertFalse(libvirtServerDiscoverer.isHostOsCompatibleWithOtherHost(hostOsInCluster, hostOs)); + } + } +} diff --git a/server/src/test/java/com/cloud/network/CreatePrivateNetworkTest.java b/server/src/test/java/com/cloud/network/CreatePrivateNetworkTest.java index 1160bf2ac8e..924d7df8896 100644 --- a/server/src/test/java/com/cloud/network/CreatePrivateNetworkTest.java +++ b/server/src/test/java/com/cloud/network/CreatePrivateNetworkTest.java @@ -132,7 +132,7 @@ public class CreatePrivateNetworkTest { ACLType.Account, false, 1L, false); when(networkService._networkMgr.createGuestNetwork(eq(ntwkOff.getId()), eq("bla"), eq("fake"), eq("10.1.1.1"), eq("10.1.1.0/24"), nullable(String.class), nullable(Boolean.class), nullable(String.class), eq(account), nullable(Long.class), eq(physicalNetwork), eq(physicalNetwork.getDataCenterId()), eq(ACLType.Account), nullable(Boolean.class), eq(1L), nullable(String.class), nullable(String.class), - nullable(Boolean.class), nullable(String.class), nullable(Network.PVlanType.class), nullable(String.class), nullable(String.class), nullable(String.class), nullable(String.class), nullable(String.class), nullable(String.class), nullable(String.class), nullable(Pair.class))).thenReturn(net); + nullable(Boolean.class), nullable(String.class), nullable(Network.PVlanType.class), nullable(String.class), nullable(String.class), nullable(String.class), nullable(String.class), nullable(String.class), nullable(String.class), nullable(String.class), nullable(Pair.class), nullable(Integer.class))).thenReturn(net); when( networkService._networkMgr.createPrivateNetwork(eq(ntwkOff.getId()), eq("bla"), eq("fake"), eq("10.1.1.1"), eq("10.1.1.0/24"), anyString(), anyBoolean(), eq(account), eq(physicalNetwork), eq(1L))).thenReturn(net); diff --git a/server/src/test/java/com/cloud/network/MockNetworkModelImpl.java b/server/src/test/java/com/cloud/network/MockNetworkModelImpl.java index 3df12a4ed84..ea9a0340eaa 100644 --- a/server/src/test/java/com/cloud/network/MockNetworkModelImpl.java +++ b/server/src/test/java/com/cloud/network/MockNetworkModelImpl.java @@ -237,7 +237,7 @@ public class MockNetworkModelImpl extends ManagerBase implements NetworkModel { * @see com.cloud.network.NetworkModel#getNetworkWithSGWithFreeIPs(java.lang.Long) */ @Override - public NetworkVO getNetworkWithSGWithFreeIPs(Long zoneId) { + public NetworkVO getNetworkWithSGWithFreeIPs(Account account, Long zoneId) { // TODO Auto-generated method stub return null; } @@ -341,6 +341,11 @@ public class MockNetworkModelImpl extends ManagerBase implements NetworkModel { return false; } + @Override + public boolean isAnyServiceSupportedInNetwork(long networkId, Provider provider, Service... services) { + return false; + } + /* (non-Javadoc) * @see com.cloud.network.NetworkModel#isProviderEnabledInPhysicalNetwork(long, java.lang.String) */ @@ -839,7 +844,11 @@ public class MockNetworkModelImpl extends ManagerBase implements NetworkModel { @Override public void checkIp6Parameters(String startIPv6, String endIPv6, String ip6Gateway, String ip6Cidr) throws InvalidParameterValueException { // TODO Auto-generated method stub + } + @Override + public void checkIp6CidrSizeEqualTo64(String ip6Cidr) throws InvalidParameterValueException { + // TODO Auto-generated method stub } @Override @@ -934,4 +943,14 @@ public class MockNetworkModelImpl extends ManagerBase implements NetworkModel { @Override public void verifyIp6DnsPair(String ip4Dns1, String ip4Dns2) {} + + @Override + public boolean isSecurityGroupSupportedForZone(Long zoneId) { + return false; + } + + @Override + public boolean checkSecurityGroupSupportForNetwork(Account account, DataCenter zone, List networkIds, List securityGroupsIds) { + return false; + } } diff --git a/server/src/test/java/com/cloud/network/NetworkModelTest.java b/server/src/test/java/com/cloud/network/NetworkModelTest.java index a1494a11d57..011764fea3c 100644 --- a/server/src/test/java/com/cloud/network/NetworkModelTest.java +++ b/server/src/test/java/com/cloud/network/NetworkModelTest.java @@ -245,29 +245,25 @@ public class NetworkModelTest { @Test(expected = InvalidParameterValueException.class) public void checkIp6ParametersTestCidr32() { String ipv6cidr = "fd59:16ba:559b:243d::/32"; - String endipv6 = "fd59:16ba:ffff:ffff:ffff:ffff:ffff:ffff"; - networkModel.checkIp6Parameters(START_IPV6, endipv6, IPV6_GATEWAY,ipv6cidr); + networkModel.checkIp6CidrSizeEqualTo64(ipv6cidr); } @Test(expected = InvalidParameterValueException.class) public void checkIp6ParametersTestCidr63() { String ipv6cidr = "fd59:16ba:559b:243d::/63"; - String endipv6 = "fd59:16ba:559b:243d:ffff:ffff:ffff:ffff"; - networkModel.checkIp6Parameters(START_IPV6, endipv6, IPV6_GATEWAY,ipv6cidr); + networkModel.checkIp6CidrSizeEqualTo64(ipv6cidr); } @Test(expected = InvalidParameterValueException.class) public void checkIp6ParametersTestCidr65() { String ipv6cidr = "fd59:16ba:559b:243d::/65"; - String endipv6 = "fd59:16ba:559b:243d:7fff:ffff:ffff:ffff"; - networkModel.checkIp6Parameters(START_IPV6, endipv6, IPV6_GATEWAY,ipv6cidr); + networkModel.checkIp6CidrSizeEqualTo64(ipv6cidr); } @Test(expected = InvalidParameterValueException.class) public void checkIp6ParametersTestCidr120() { String ipv6cidr = "fd59:16ba:559b:243d::/120"; - String endipv6 = "fd59:16ba:559b:243d:0:0:0:ff"; - networkModel.checkIp6Parameters(START_IPV6, endipv6, IPV6_GATEWAY,ipv6cidr); + networkModel.checkIp6CidrSizeEqualTo64(ipv6cidr); } @Test(expected = InvalidParameterValueException.class) diff --git a/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java b/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java index 7832537fe71..64d813c9ba8 100644 --- a/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java +++ b/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java @@ -19,10 +19,9 @@ package com.cloud.network; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.Mockito.lenient; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.when; @@ -53,6 +52,7 @@ import org.apache.cloudstack.api.command.user.network.UpdateNetworkCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.network.RoutedIpv4Manager; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; @@ -67,6 +67,7 @@ import org.springframework.test.util.ReflectionTestUtils; import com.cloud.agent.api.to.IpAddressTO; import com.cloud.alert.AlertManager; +import com.cloud.bgp.BGPService; import com.cloud.configuration.ConfigurationManager; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; @@ -211,6 +212,12 @@ public class NetworkServiceImplTest { @Mock private IpAddressManager ipAddressManagerMock; + @Mock + private RoutedIpv4Manager routedIpv4Manager; + + @Mock + BGPService bgpService; + @Mock private Ip ipMock; @Mock @@ -248,6 +255,10 @@ public class NetworkServiceImplTest { private AutoCloseable closeable; + private NetworkOfferingVO networkOfferingVO; + private Long zoneId = 10L; + private Long networkId = 11L; + @BeforeClass public static void setUpBeforeClass() { Date date = new Date(); @@ -310,7 +321,6 @@ public class NetworkServiceImplTest { Mockito.when(networkOfferingDao.findById(1L)).thenReturn(offering); Mockito.when(physicalNetworkDao.findById(Mockito.anyLong())).thenReturn(phyNet); Mockito.when(dcDao.findById(Mockito.anyLong())).thenReturn(dc); - Mockito.lenient().doNothing().when(accountManager).checkAccess(accountMock, networkOffering, dc); Mockito.when(accountManager.isRootAdmin(accountMock.getId())).thenReturn(true); } @@ -430,7 +440,6 @@ public class NetworkServiceImplTest { Mockito.when(dc.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); Map networkProvidersMap = new HashMap(); Mockito.when(networkManager.finalizeServicesAndProvidersForNetwork(ArgumentMatchers.any(NetworkOffering.class), anyLong())).thenReturn(networkProvidersMap); - lenient().doNothing().when(alertManager).sendAlert(any(AlertService.AlertType.class), anyLong(), anyLong(), anyString(), anyString()); Mockito.when(configMgr.isOfferingForVpc(offering)).thenReturn(false); Mockito.when(offering.isInternalLb()).thenReturn(false); @@ -439,7 +448,7 @@ public class NetworkServiceImplTest { null, null, false, null, accountMock, null, phyNet, 1L, null, null, null, null, null, true, null, null, null, null, null, - null, null, null, null, new Pair<>(1500, privateMtu)); + null, null, null, null, new Pair<>(1500, privateMtu), null); } @Test public void testValidateMtuConfigWhenMtusExceedThreshold() { @@ -488,7 +497,7 @@ public class NetworkServiceImplTest { Mockito.verify(vpcMgr, times(1)).createVpcGuestNetwork(1L, "testNetwork", "Test Network", null, null, null, null, accountMock, null, phyNet, 1L, null, null, 1L, null, accountMock, - true, null, null, null, null, null, null, null, new Pair<>(0, 1000)); + true, null, null, null, null, null, null, null, new Pair<>(0, 1000), null); } @@ -551,7 +560,7 @@ public class NetworkServiceImplTest { private void prepareCreateNetworkDnsMocks(CreateNetworkCmd cmd, Network.GuestType guestType, boolean ipv6, boolean isVpc, boolean dnsServiceSupported) { long networkOfferingId = 1L; Mockito.when(cmd.getNetworkOfferingId()).thenReturn(networkOfferingId); - NetworkOfferingVO networkOfferingVO = Mockito.mock(NetworkOfferingVO.class); + networkOfferingVO = Mockito.mock(NetworkOfferingVO.class); Mockito.when(networkOfferingVO.getId()).thenReturn(networkOfferingId); Mockito.when(networkOfferingVO.getGuestType()).thenReturn(guestType); Mockito.when(networkOfferingDao.findById(networkOfferingId)).thenReturn(networkOfferingVO); @@ -598,6 +607,7 @@ public class NetworkServiceImplTest { CreateNetworkCmd cmd = Mockito.mock(CreateNetworkCmd.class); prepareCreateNetworkDnsMocks(cmd, Network.GuestType.Isolated, false, false, true); Mockito.when(cmd.getIp4Dns1()).thenReturn(ip4Dns[0]); + Mockito.when(cmd.getCidrSize()).thenReturn(null); try { service.createGuestNetwork(cmd); } catch (InsufficientCapacityException | ResourceAllocationException e) { @@ -742,6 +752,31 @@ public class NetworkServiceImplTest { Assert.assertNull(networkVO.getIp6Dns2()); } + @Test + public void testCreateIpv4RoutedNetwork() throws InsufficientCapacityException, ResourceAllocationException { + registerCallContext(); + CreateNetworkCmd cmd = Mockito.mock(CreateNetworkCmd.class); + Mockito.when(cmd.getCidrSize()).thenReturn(24); + prepareCreateNetworkDnsMocks(cmd, Network.GuestType.Isolated, false, false, true); + when(networkOfferingVO.getNetworkMode()).thenReturn(NetworkOffering.NetworkMode.ROUTED); + when(networkOfferingVO.getRoutingMode()).thenReturn(NetworkOffering.RoutingMode.Static); + when(routedIpv4Manager.isVirtualRouterGateway(networkOfferingVO)).thenReturn(true); + doNothing().when(routedIpv4Manager).assignIpv4SubnetToNetwork(nullable(Network.class)); + + DataCenterVO zone = Mockito.mock(DataCenterVO.class); + when(cmd.getZoneId()).thenReturn(zoneId); + when(dcDao.findById(zoneId)).thenReturn(zone); + when(zone.getId()).thenReturn(zoneId); + + try { + service.createGuestNetwork(cmd); + } catch (InsufficientCapacityException | ResourceAllocationException e) { + Assert.fail(String.format("failure with exception: %s", e.getMessage())); + } + + Mockito.verify(routedIpv4Manager).assignIpv4SubnetToNetwork(nullable(Network.class)); + } + @Test public void testCheckAndUpdateNetworkResetSuccess() { NetworkVO networkVO = new NetworkVO(); diff --git a/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java b/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java index aaf0f254d41..b391aeb9f07 100644 --- a/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java +++ b/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java @@ -42,6 +42,7 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.network.Network; +import com.cloud.network.NetworkModel; import com.cloud.network.as.dao.AutoScalePolicyConditionMapDao; import com.cloud.network.as.dao.AutoScalePolicyDao; import com.cloud.network.as.dao.AutoScaleVmGroupDao; @@ -122,6 +123,7 @@ import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.userdata.UserDataManager; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -138,6 +140,7 @@ import org.springframework.test.util.ReflectionTestUtils; import java.lang.reflect.Field; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.LinkedHashMap; @@ -189,6 +192,9 @@ public class AutoScaleManagerImplTest { @Mock UserVmManager userVmMgr; + @Mock + UserDataManager userDataMgr; + @Mock EntityManager entityManager; @@ -351,6 +357,8 @@ public class AutoScaleManagerImplTest { @Mock NetworkVO networkMock; @Mock + NetworkModel networkModel; + @Mock NetworkOfferingVO networkOfferingMock; @Mock CounterVO counterMock; @@ -406,7 +414,7 @@ public class AutoScaleManagerImplTest { userDataDetails.put("0", new HashMap<>() {{ put("key1", "value1"); put("key2", "value2"); }}); Mockito.doReturn(userDataFinal).when(userVmMgr).finalizeUserData(any(), any(), any()); - Mockito.doReturn(userDataFinal).when(userVmMgr).validateUserData(eq(userDataFinal), nullable(BaseCmd.HTTPMethod.class)); + Mockito.doReturn(userDataFinal).when(userDataMgr).validateUserData(eq(userDataFinal), nullable(BaseCmd.HTTPMethod.class)); } @After @@ -760,7 +768,7 @@ public class AutoScaleManagerImplTest { Mockito.verify(autoScaleVmProfileDao).persist(Mockito.any()); Mockito.verify(userVmMgr).finalizeUserData(any(), any(), any()); - Mockito.verify(userVmMgr).validateUserData(eq(userDataFinal), nullable(BaseCmd.HTTPMethod.class)); + Mockito.verify(userDataMgr).validateUserData(eq(userDataFinal), nullable(BaseCmd.HTTPMethod.class)); } } @@ -821,7 +829,7 @@ public class AutoScaleManagerImplTest { Mockito.verify(autoScaleVmProfileDao).persist(Mockito.any()); Mockito.verify(userVmMgr).finalizeUserData(any(), any(), any()); - Mockito.verify(userVmMgr).validateUserData(eq(userDataFinal), nullable(BaseCmd.HTTPMethod.class)); + Mockito.verify(userDataMgr).validateUserData(eq(userDataFinal), nullable(BaseCmd.HTTPMethod.class)); } @Test @@ -1307,10 +1315,11 @@ public class AutoScaleManagerImplTest { when(userVmMock.getId()).thenReturn(virtualMachineId); when(zoneMock.getNetworkType()).thenReturn(DataCenter.NetworkType.Advanced); - when(zoneMock.isSecurityGroupEnabled()).thenReturn(true); when(userVmService.createAdvancedSecurityGroupVirtualMachine(any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), eq(true), any(), any())).thenReturn(userVmMock); + when(networkModel.checkSecurityGroupSupportForNetwork(account, zoneMock, + List.of(networkId), Collections.emptyList())).thenReturn(true); long result = autoScaleManagerImplSpy.createNewVM(asVmGroupMock); @@ -1356,10 +1365,11 @@ public class AutoScaleManagerImplTest { when(userVmMock.getId()).thenReturn(virtualMachineId); when(zoneMock.getNetworkType()).thenReturn(DataCenter.NetworkType.Advanced); - when(zoneMock.isSecurityGroupEnabled()).thenReturn(false); when(userVmService.createAdvancedVirtualMachine(any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), eq(userData), eq(userDataId), eq(userDataDetails.toString()), any(), any(), any(), eq(true), any(), any(), any(), any(), any(), any(), any(), eq(true), any(), any())).thenReturn(userVmMock); + when(networkModel.checkSecurityGroupSupportForNetwork(account, zoneMock, + List.of(networkId), Collections.emptyList())).thenReturn(false); long result = autoScaleManagerImplSpy.createNewVM(asVmGroupMock); diff --git a/server/src/test/java/com/cloud/network/element/ConfigDriveNetworkElementTest.java b/server/src/test/java/com/cloud/network/element/ConfigDriveNetworkElementTest.java index d83120d75f3..8c8dc33d7ec 100644 --- a/server/src/test/java/com/cloud/network/element/ConfigDriveNetworkElementTest.java +++ b/server/src/test/java/com/cloud/network/element/ConfigDriveNetworkElementTest.java @@ -61,6 +61,7 @@ import com.cloud.vm.dao.UserVmDetailsDao; import com.cloud.vm.dao.VMInstanceDao; import com.google.common.collect.Maps; import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; @@ -83,6 +84,7 @@ import org.mockito.junit.MockitoJUnitRunner; import java.lang.reflect.Field; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -148,6 +150,7 @@ public class ConfigDriveNetworkElementTest { @Mock private AgentManager agentManager; @Mock private CallContext callContextMock; @Mock private DomainVO domainVO; + @Mock private NetworkOrchestrationService _networkOrchestrationService; @Spy @InjectMocks private ConfigDriveNetworkElement _configDrivesNetworkElement = new ConfigDriveNetworkElement(); @@ -264,13 +267,9 @@ public class ConfigDriveNetworkElementTest { try (MockedStatic ignored1 = Mockito.mockStatic(ConfigDriveBuilder.class); MockedStatic ignored2 = Mockito.mockStatic(CallContext.class)) { Mockito.when(CallContext.current()).thenReturn(callContextMock); Mockito.doReturn(Mockito.mock(Account.class)).when(callContextMock).getCallingAccount(); - Mockito.when(ConfigDriveBuilder.buildConfigDrive(Mockito.anyList(), Mockito.anyString(), Mockito.anyString(), Mockito.anyMap())).thenReturn("content"); final HandleConfigDriveIsoAnswer answer = mock(HandleConfigDriveIsoAnswer.class); final UserVmDetailVO userVmDetailVO = mock(UserVmDetailVO.class); - when(agentManager.easySend(Mockito.anyLong(), Mockito.any(HandleConfigDriveIsoCommand.class))).thenReturn(answer); - when(answer.getResult()).thenReturn(true); - when(answer.getConfigDriveLocation()).thenReturn(NetworkElement.Location.PRIMARY); when(network.getTrafficType()).thenReturn(Networks.TrafficType.Guest); when(virtualMachine.getUuid()).thenReturn("vm-uuid"); when(userVmDetailVO.getValue()).thenReturn(PUBLIC_KEY); @@ -288,6 +287,28 @@ public class ConfigDriveNetworkElementTest { profile.setConfigDriveLabel("testlabel"); assertTrue(_configDrivesNetworkElement.addPasswordAndUserdata( network, nicp, profile, deployDestination, null)); + } + } + + @Test + public void testCreateConfigDriveIso() throws Exception { + try (MockedStatic ignored1 = Mockito.mockStatic(ConfigDriveBuilder.class); MockedStatic ignored2 = Mockito.mockStatic(CallContext.class)) { + Mockito.when(CallContext.current()).thenReturn(callContextMock); + Mockito.when(ConfigDriveBuilder.buildConfigDrive(Mockito.anyList(), Mockito.anyList(), Mockito.anyString(), Mockito.anyString(), Mockito.anyMap(), Mockito.anyMap())).thenReturn("content"); + + final HandleConfigDriveIsoAnswer answer = mock(HandleConfigDriveIsoAnswer.class); + when(agentManager.easySend(Mockito.anyLong(), Mockito.any(HandleConfigDriveIsoCommand.class))).thenReturn(answer); + when(answer.getResult()).thenReturn(true); + when(answer.getConfigDriveLocation()).thenReturn(NetworkElement.Location.PRIMARY); + when(virtualMachine.getUuid()).thenReturn("vm-uuid"); + + Map parms = Maps.newHashMap(); + parms.put(VirtualMachineProfile.Param.VmPassword, PASSWORD); + parms.put(VirtualMachineProfile.Param.VmSshPubKey, PUBLIC_KEY); + VirtualMachineProfile profile = new VirtualMachineProfileImpl(virtualMachine, null, serviceOfferingVO, null, parms); + profile.setConfigDriveLabel("testlabel"); + profile.setVmData(Collections.emptyList()); + assertTrue(_configDrivesNetworkElement.createConfigDriveIso(nicp, profile, deployDestination, null)); ArgumentCaptor commandCaptor = ArgumentCaptor.forClass(HandleConfigDriveIsoCommand.class); verify(agentManager, times(1)).easySend(Mockito.anyLong(), commandCaptor.capture()); diff --git a/server/src/test/java/com/cloud/network/rules/BgpPeersRulesTest.java b/server/src/test/java/com/cloud/network/rules/BgpPeersRulesTest.java new file mode 100644 index 00000000000..49067f265c9 --- /dev/null +++ b/server/src/test/java/com/cloud/network/rules/BgpPeersRulesTest.java @@ -0,0 +1,98 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements.See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership.The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License.You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied.See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.network.rules; + +import com.cloud.dc.DataCenter; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.Network; +import com.cloud.network.router.VirtualRouter; + +import org.apache.cloudstack.network.BgpPeer; +import org.apache.cloudstack.network.topology.NetworkTopologyVisitor; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.Arrays; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +@RunWith(MockitoJUnitRunner.class) +public class BgpPeersRulesTest { + + private BgpPeersRules bgpPeersRules; + private Network mockNetwork; + private List mockBgpPeers; + private NetworkTopologyVisitor mockVisitor; + private VirtualRouter mockRouter; + + @Before + public void setUp() { + mockNetwork = mock(Network.class); + BgpPeer peer1 = mock(BgpPeer.class); + BgpPeer peer2 = mock(BgpPeer.class); + mockBgpPeers = Arrays.asList(peer1, peer2); + + mockVisitor = mock(NetworkTopologyVisitor.class); + mockRouter = mock(VirtualRouter.class); + + bgpPeersRules = new BgpPeersRules(mockBgpPeers, mockNetwork); + } + + @Test + public void testGetBgpPeers() { + List bgpPeers = bgpPeersRules.getBgpPeers(); + assertNotNull(bgpPeers); + assertEquals(2, bgpPeers.size()); + assertTrue(bgpPeers.containsAll(mockBgpPeers)); + } + + @Test + public void testAccept() throws ResourceUnavailableException { + when(mockVisitor.visit(bgpPeersRules)).thenReturn(true); + + boolean result = bgpPeersRules.accept(mockVisitor, mockRouter); + + assertTrue(result); + verify(mockVisitor, times(1)).visit(bgpPeersRules); + } + + @Test + public void testAcceptThrowsResourceUnavailableException() throws ResourceUnavailableException { + when(mockVisitor.visit(bgpPeersRules)).thenThrow(new ResourceUnavailableException("Resource Unavailable", DataCenter.class, 1L)); + + ResourceUnavailableException thrown = assertThrows(ResourceUnavailableException.class, () -> { + bgpPeersRules.accept(mockVisitor, mockRouter); + }); + + assertEquals("Resource [DataCenter:1] is unreachable: Resource Unavailable", thrown.getMessage()); + assertEquals(DataCenter.class, thrown.getScope()); + assertEquals(1L, thrown.getResourceId()); + + verify(mockVisitor, times(1)).visit(bgpPeersRules); + } +} diff --git a/server/src/test/java/com/cloud/network/vpc/VpcManagerImplTest.java b/server/src/test/java/com/cloud/network/vpc/VpcManagerImplTest.java index deffb165f29..54acaa58acc 100644 --- a/server/src/test/java/com/cloud/network/vpc/VpcManagerImplTest.java +++ b/server/src/test/java/com/cloud/network/vpc/VpcManagerImplTest.java @@ -73,6 +73,7 @@ import org.apache.cloudstack.api.command.user.vpc.UpdateVPCCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.network.RoutedIpv4Manager; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -98,10 +99,14 @@ import static org.junit.Assert.assertNotNull; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.nullable; import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; @RunWith(MockitoJUnitRunner.class) public class VpcManagerImplTest { @@ -155,6 +160,8 @@ public class VpcManagerImplTest { FirewallRulesDao firewallDao; @Mock NetworkACLVO networkACLVOMock; + @Mock + RoutedIpv4Manager routedIpv4Manager; public static final long ACCOUNT_ID = 1; private AccountVO account; @@ -179,6 +186,8 @@ public class VpcManagerImplTest { private AutoCloseable closeable; + private VpcOfferingVO vpcOfferingVO; + private void registerCallContext() { account = new AccountVO("testaccount", 1L, "networkdomain", Account.Type.NORMAL, "uuid"); account.setId(ACCOUNT_ID); @@ -212,6 +221,7 @@ public class VpcManagerImplTest { manager._ntwkSvc = networkServiceMock; manager._firewallDao = firewallDao; manager._networkAclDao = networkACLDaoMock; + manager.routedIpv4Manager = routedIpv4Manager; CallContext.register(Mockito.mock(User.class), Mockito.mock(Account.class)); registerCallContext(); overrideDefaultConfigValue(NetworkService.AllowUsersToSpecifyVRMtu, "_defaultValue", "false"); @@ -365,13 +375,13 @@ public class VpcManagerImplTest { manager.createVpcGuestNetwork(1L, "vpcNet1", "vpc tier 1", null, "10.10.10.0/24", null, null, accountMock, null, physicalNetwork, 1L, null, null, 1L, null, accountMock, - true, null, null, null, null, null, null, null, new Pair<>(1000, 1000)); + true, null, null, null, null, null, null, null, new Pair<>(1000, 1000), null); Mockito.verify(networkMgr, times(1)).createGuestNetwork(1L, "vpcNet1", "vpc tier 1", null, "10.10.10.0/24", null, false, "cs1cloud.internal", accountMock, null, physicalNetwork, zoneId, null, null, 1L, null, null, true, null, null, null, null, - null, null, null, null, null, new Pair<>(1000, 1000)); + null, null, null, null, null, new Pair<>(1000, 1000), null); } @Test @@ -462,7 +472,7 @@ public class VpcManagerImplTest { private void mockVpcDnsResources(boolean supportDnsService, boolean isIpv6) { Mockito.when(accountManager.getAccount(vpcOwnerId)).thenReturn(account); - VpcOfferingVO vpcOfferingVO = Mockito.mock(VpcOfferingVO.class); + vpcOfferingVO = Mockito.mock(VpcOfferingVO.class); Mockito.when(vpcOfferingVO.getId()).thenReturn(vpcOfferingId); Mockito.when(vpcOfferingVO.getState()).thenReturn(VpcOffering.State.Enabled); Mockito.when(vpcOfferingDao.findById(vpcOfferingId)).thenReturn(vpcOfferingVO); @@ -479,7 +489,7 @@ public class VpcManagerImplTest { try { doNothing().when(resourceLimitService).checkResourceLimit(account, Resource.ResourceType.vpc); manager.createVpc(zoneId, vpcOfferingId, vpcOwnerId, vpcName, vpcName, ip4Cidr, vpcDomain, - ip4Dns[0], null, null, null, true, 1500); + ip4Dns[0], null, null, null, true, 1500, null, null, null); } catch (ResourceAllocationException e) { Assert.fail(String.format("failure with exception: %s", e.getMessage())); } @@ -491,12 +501,46 @@ public class VpcManagerImplTest { try { doNothing().when(resourceLimitService).checkResourceLimit(account, Resource.ResourceType.vpc); manager.createVpc(zoneId, vpcOfferingId, vpcOwnerId, vpcName, vpcName, ip4Cidr, vpcDomain, - ip4Dns[0], ip4Dns[1], ip6Dns[0], null, true, 1500); + ip4Dns[0], ip4Dns[1], ip6Dns[0], null, true, 1500, null, null, null); } catch (ResourceAllocationException e) { Assert.fail(String.format("failure with exception: %s", e.getMessage())); } } + @Test + public void testCreateVpc() { + mockVpcDnsResources(true, false); + VpcVO vpc = Mockito.mock(VpcVO.class); + Mockito.when(vpcDao.persist(any(), anyMap())).thenReturn(vpc); + Mockito.when(vpc.getUuid()).thenReturn("uuid"); + try { + doNothing().when(resourceLimitService).checkResourceLimit(account, Resource.ResourceType.vpc); + manager.createVpc(zoneId, vpcOfferingId, vpcOwnerId, vpcName, vpcName, ip4Cidr, vpcDomain, + ip4Dns[0], ip4Dns[1], null, null, true, 1500, null, null, null); + } catch (ResourceAllocationException e) { + Assert.fail(String.format("failure with exception: %s", e.getMessage())); + } + } + + @Test + public void testCreateRoutedVpc() { + mockVpcDnsResources(true, false); + VpcVO vpc = Mockito.mock(VpcVO.class); + Mockito.when(vpcDao.persist(any(), anyMap())).thenReturn(vpc); + Mockito.when(vpc.getUuid()).thenReturn("uuid"); + doReturn(true).when(routedIpv4Manager).isRoutedVpc(any()); + doNothing().when(routedIpv4Manager).getOrCreateIpv4SubnetForVpc(any(), anyString()); + try { + doNothing().when(resourceLimitService).checkResourceLimit(account, Resource.ResourceType.vpc); + manager.createVpc(zoneId, vpcOfferingId, vpcOwnerId, vpcName, vpcName, ip4Cidr, vpcDomain, + ip4Dns[0], ip4Dns[1], null, null, true, 1500, null, null, null); + } catch (ResourceAllocationException e) { + Assert.fail(String.format("failure with exception: %s", e.getMessage())); + } + + verify(routedIpv4Manager).getOrCreateIpv4SubnetForVpc(any(), anyString()); + } + @Test public void validateVpcPrivateGatewayAclIdTestNullAclVoThrowsInvalidParameterValueException() { Mockito.doReturn(null).when(networkACLDaoMock).findById(aclId); diff --git a/server/src/test/java/com/cloud/projects/ProjectManagerImplTest.java b/server/src/test/java/com/cloud/projects/ProjectManagerImplTest.java index 94dffd9fe8e..b9b568facc2 100644 --- a/server/src/test/java/com/cloud/projects/ProjectManagerImplTest.java +++ b/server/src/test/java/com/cloud/projects/ProjectManagerImplTest.java @@ -16,20 +16,27 @@ // under the License. package com.cloud.projects; -import com.cloud.projects.dao.ProjectDao; +import java.util.ArrayList; +import java.util.List; + +import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.webhook.WebhookHelper; +import org.apache.commons.collections.CollectionUtils; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; +import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; import org.mockito.stubbing.Answer; +import org.springframework.beans.factory.NoSuchBeanDefinitionException; -import java.util.ArrayList; -import java.util.List; +import com.cloud.projects.dao.ProjectDao; +import com.cloud.utils.component.ComponentContext; @RunWith(MockitoJUnitRunner.class) @@ -94,4 +101,31 @@ public class ProjectManagerImplTest { public void testUpdateProjectNameAndDisplayTextUpdateNameDisplayText() { runUpdateProjectNameAndDisplayTextTest(true, true); } + + @Test + public void testDeleteWebhooksForAccount() { + try (MockedStatic mockedComponentContext = Mockito.mockStatic(ComponentContext.class)) { + WebhookHelper webhookHelper = Mockito.mock(WebhookHelper.class); + List webhooks = List.of(Mockito.mock(ControlledEntity.class), + Mockito.mock(ControlledEntity.class)); + Mockito.doReturn(webhooks).when(webhookHelper).listWebhooksByAccount(Mockito.anyLong()); + mockedComponentContext.when(() -> ComponentContext.getDelegateComponentOfType(WebhookHelper.class)) + .thenReturn(webhookHelper); + Project project = Mockito.mock(Project.class); + Mockito.when(project.getProjectAccountId()).thenReturn(1L); + List result = projectManager.listWebhooksForProject(project); + Assert.assertEquals(2, result.size()); + } + } + + @Test + public void testDeleteWebhooksForAccountNoBean() { + try (MockedStatic mockedComponentContext = Mockito.mockStatic(ComponentContext.class)) { + mockedComponentContext.when(() -> ComponentContext.getDelegateComponentOfType(WebhookHelper.class)) + .thenThrow(NoSuchBeanDefinitionException.class); + List result = + projectManager.listWebhooksForProject(Mockito.mock(Project.class)); + Assert.assertTrue(CollectionUtils.isEmpty(result)); + } + } } diff --git a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java index c38cfc3b832..6aae7a091d3 100755 --- a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java +++ b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java @@ -431,6 +431,17 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana return null; } + @Override + public List listAllHostsInOneZoneNotInClusterByHypervisor(HypervisorType type, long dcId, long clusterId) { + return null; + } + + @Override + public List listAllHostsInOneZoneNotInClusterByHypervisors(List types, long dcId, long clusterId) { + // TODO Auto-generated method stub + return null; + } + /* (non-Javadoc) * @see com.cloud.resource.ResourceManager#listAvailHypervisorInZone(java.lang.Long, java.lang.Long) */ @@ -590,6 +601,12 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana return null; } + @Override + public List listAllUpHostsInOneZoneByHypervisor(final HypervisorType type, final long dcId) { + // TODO Auto-generated method stub + return null; + } + @Override public List listAllUpAndEnabledHostsInOneZone(final long dcId) { // TODO Auto-generated method stub diff --git a/server/src/test/java/com/cloud/resourcelimit/CheckedReservationTest.java b/server/src/test/java/com/cloud/resourcelimit/CheckedReservationTest.java index ffd6063722f..247647dd010 100644 --- a/server/src/test/java/com/cloud/resourcelimit/CheckedReservationTest.java +++ b/server/src/test/java/com/cloud/resourcelimit/CheckedReservationTest.java @@ -24,7 +24,9 @@ import static org.mockito.Mockito.lenient; import static org.mockito.Mockito.when; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.reservation.ReservationVO; @@ -143,4 +145,41 @@ public class CheckedReservationTest { Assert.fail("Exception faced: " + e.getMessage()); } } + + @Test + public void testMultipleReservationsWithOneFailing() { + List tags = List.of("abc", "xyz"); + when(account.getAccountId()).thenReturn(1L); + when(account.getDomainId()).thenReturn(4L); + Map persistedReservations = new HashMap<>(); + Mockito.when(reservationDao.persist(Mockito.any(ReservationVO.class))).thenAnswer((Answer) invocation -> { + ReservationVO reservationVO = (ReservationVO) invocation.getArguments()[0]; + Long id = (long) (persistedReservations.size() + 1); + ReflectionTestUtils.setField(reservationVO, "id", id); + persistedReservations.put(id, reservationVO); + return reservationVO; + }); + Mockito.when(reservationDao.remove(Mockito.anyLong())).thenAnswer((Answer) invocation -> { + Long id = (Long) invocation.getArguments()[0]; + persistedReservations.remove(id); + return true; + }); + try { + Mockito.doThrow(ResourceAllocationException.class).when(resourceLimitService).checkResourceLimitWithTag(account, Resource.ResourceType.cpu, "xyz", 1L); + try (CheckedReservation vmReservation = new CheckedReservation(account, Resource.ResourceType.user_vm, tags, 1L, reservationDao, resourceLimitService); + CheckedReservation cpuReservation = new CheckedReservation(account, Resource.ResourceType.cpu, tags, 1L, reservationDao, resourceLimitService); + CheckedReservation memReservation = new CheckedReservation(account, Resource.ResourceType.memory, tags, 256L, reservationDao, resourceLimitService); + ) { + Assert.fail("Exception should have occurred but all reservations successful!"); + } catch (Exception ex) { + if (!(ex instanceof ResourceAllocationException)) { + Assert.fail(String.format("Expected ResourceAllocationException but %s occurred!", ex.getClass().getSimpleName())); + } + throw ex; + } + } catch (Exception rae) { + // Check if all persisted reservations are removed + Assert.assertTrue("All persisted reservations are not removed", persistedReservations.isEmpty()); + } + } } diff --git a/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java b/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java index 3d31561f268..defcd09b174 100644 --- a/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java +++ b/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java @@ -190,6 +190,12 @@ public class ResourceLimitManagerImplTest extends TestCase { // update resource Limit for a domain for resource_type = 11 (Secondary storage (in GiB)) resourceLimitServiceCall(null, (long)1, 10, (long)400); + + // update resource Limit for an account for resource_type = 5 (Project) + resourceLimitServiceCall((long) 1, (long) 1, 5, (long) 50); + + // update resource Limit for a domain for resource_type = 5 (Project) + resourceLimitServiceCall(null, (long) 1, 5, (long) 100); } private void resourceLimitServiceCall(Long accountId, Long domainId, Integer resourceType, Long max) { @@ -413,6 +419,36 @@ public class ResourceLimitManagerImplTest extends TestCase { Assert.assertEquals(defaultAccountCpuMax, result); } + @Test + public void testFindCorrectResourceLimitForAccountProjects() { + AccountVO account = Mockito.mock(AccountVO.class); + Mockito.when(account.getId()).thenReturn(1L); + Mockito.when(accountManager.isRootAdmin(1L)).thenReturn(true); + + long result = resourceLimitManager.findCorrectResourceLimitForAccount(account, + Resource.ResourceType.project, hostTags.get(0)); + Assert.assertEquals(Resource.RESOURCE_UNLIMITED, result); + + Mockito.when(accountManager.isRootAdmin(1L)).thenReturn(false); + ResourceLimitVO limit = new ResourceLimitVO(); + limit.setMax(10L); + Mockito.when(resourceLimitDao.findByOwnerIdAndTypeAndTag(1L, Resource.ResourceOwnerType.Account, + Resource.ResourceType.project, hostTags.get(0))).thenReturn(limit); + result = resourceLimitManager.findCorrectResourceLimitForAccount(account, Resource.ResourceType.project, + hostTags.get(0)); + Assert.assertEquals(10L, result); + + long defaultAccountProjectsMax = 15L; + Map accountResourceLimitMap = new HashMap<>(); + accountResourceLimitMap.put(Resource.ResourceType.project.name(), defaultAccountProjectsMax); + resourceLimitManager.accountResourceLimitMap = accountResourceLimitMap; + Mockito.when(resourceLimitDao.findByOwnerIdAndTypeAndTag(1L, Resource.ResourceOwnerType.Account, + Resource.ResourceType.project, hostTags.get(0))).thenReturn(null); + result = resourceLimitManager.findCorrectResourceLimitForAccount(account, Resource.ResourceType.project, + hostTags.get(0)); + Assert.assertEquals(defaultAccountProjectsMax, result); + } + @Test public void testFindCorrectResourceLimitForAccountId1() { // long accountId = 1L; @@ -472,6 +508,68 @@ public class ResourceLimitManagerImplTest extends TestCase { Assert.assertEquals(defaultDomainCpuMax, result); } + @Test + public void testResourceUnlimitedForDomainProjects() { + DomainVO domain = Mockito.mock(DomainVO.class); + Mockito.when(domain.getId()).thenReturn(1L); + + long result = resourceLimitManager.findCorrectResourceLimitForDomain(domain, Resource.ResourceType.project, + hostTags.get(0)); + Assert.assertEquals(Resource.RESOURCE_UNLIMITED, result); + } + @Test + public void testSpecificLimitForDomainProjects() { + DomainVO domain = Mockito.mock(DomainVO.class); + Mockito.when(domain.getId()).thenReturn(2L); + + ResourceLimitVO limit = new ResourceLimitVO(); + limit.setMax(100L); + Mockito.when(resourceLimitDao.findByOwnerIdAndTypeAndTag(2L, Resource.ResourceOwnerType.Domain, Resource.ResourceType.project, hostTags.get(0))).thenReturn(limit); + + long result = resourceLimitManager.findCorrectResourceLimitForDomain(domain, Resource.ResourceType.project, hostTags.get(0)); + Assert.assertEquals(100L, result); + } + + @Test + public void testParentDomainLimitForDomainProjects() { + DomainVO domain = Mockito.mock(DomainVO.class); + Mockito.when(domain.getId()).thenReturn(3L); + + DomainVO parentDomain = Mockito.mock(DomainVO.class); + Mockito.when(domain.getParent()).thenReturn(5L); + Mockito.when(domainDao.findById(5L)).thenReturn(parentDomain); + + ResourceLimitVO limit = new ResourceLimitVO(); + limit.setMax(200L); + Mockito.when(resourceLimitDao.findByOwnerIdAndTypeAndTag(3L, Resource.ResourceOwnerType.Domain, + Resource.ResourceType.project, hostTags.get(0))).thenReturn(null); + Mockito.when(resourceLimitDao.findByOwnerIdAndTypeAndTag(5L, Resource.ResourceOwnerType.Domain, + Resource.ResourceType.project, hostTags.get(0))).thenReturn(limit); + + long result = resourceLimitManager.findCorrectResourceLimitForDomain(domain, Resource.ResourceType.project, + hostTags.get(0)); + Assert.assertEquals(200L, result); + } + + @Test + public void testDefaultDomainProjectLimit() { + DomainVO domain = Mockito.mock(DomainVO.class); + Mockito.when(domain.getId()).thenReturn(4L); + Mockito.when(domain.getParent()).thenReturn(null); + + long defaultDomainProjectsMax = 250L; + Map domainResourceLimitMap = new HashMap<>(); + domainResourceLimitMap.put(Resource.ResourceType.project.name(), defaultDomainProjectsMax); + resourceLimitManager.domainResourceLimitMap = domainResourceLimitMap; + + Mockito.when(resourceLimitDao.findByOwnerIdAndTypeAndTag(4L, Resource.ResourceOwnerType.Domain, + Resource.ResourceType.project, hostTags.get(0))).thenReturn(null); + + long result = resourceLimitManager.findCorrectResourceLimitForDomain(domain, Resource.ResourceType.project, + hostTags.get(0)); + Assert.assertEquals(defaultDomainProjectsMax, result); + } + @Test public void testCheckResourceLimitWithTag() { AccountVO account = Mockito.mock(AccountVO.class); diff --git a/server/src/test/java/com/cloud/server/StatsCollectorTest.java b/server/src/test/java/com/cloud/server/StatsCollectorTest.java index 1f6a35cfbae..2b2451c66c7 100644 --- a/server/src/test/java/com/cloud/server/StatsCollectorTest.java +++ b/server/src/test/java/com/cloud/server/StatsCollectorTest.java @@ -28,7 +28,7 @@ import com.cloud.user.VmDiskStatisticsVO; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VmStats; import com.cloud.vm.VmStatsVO; -import com.cloud.vm.dao.VmStatsDao; +import com.cloud.vm.dao.VmStatsDaoImpl; import com.google.gson.Gson; import com.tngtech.java.junit.dataprovider.DataProvider; import com.tngtech.java.junit.dataprovider.DataProviderRunner; @@ -81,7 +81,7 @@ public class StatsCollectorTest { private static final String DEFAULT_DATABASE_NAME = "cloudstack"; @Mock - VmStatsDao vmStatsDaoMock = Mockito.mock(VmStatsDao.class); + VmStatsDaoImpl vmStatsDaoMock; @Mock VmStatsEntry statsForCurrentIterationMock; @@ -304,7 +304,7 @@ public class StatsCollectorTest { statsCollector.cleanUpVirtualMachineStats(); - Mockito.verify(vmStatsDaoMock, Mockito.never()).removeAllByTimestampLessThan(Mockito.any()); + Mockito.verify(vmStatsDaoMock, Mockito.never()).removeAllByTimestampLessThan(Mockito.any(), Mockito.anyLong()); } @Test @@ -313,7 +313,7 @@ public class StatsCollectorTest { statsCollector.cleanUpVirtualMachineStats(); - Mockito.verify(vmStatsDaoMock).removeAllByTimestampLessThan(Mockito.any()); + Mockito.verify(vmStatsDaoMock).removeAllByTimestampLessThan(Mockito.any(), Mockito.anyLong()); } @Test diff --git a/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java b/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java index 2910476c18e..fcbae4f339c 100644 --- a/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java +++ b/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java @@ -21,6 +21,24 @@ import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; +import org.apache.cloudstack.api.command.admin.storage.ChangeStoragePoolScopeCmd; +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenter; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.exception.ConnectionException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.host.Host; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.AccountManagerImpl; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.dao.VMInstanceDao; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.framework.config.ConfigDepot; @@ -28,6 +46,8 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao; import org.apache.cloudstack.storage.command.CheckDataStoreStoragePolicyComplainceCommand; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.commons.collections.MapUtils; import org.junit.Assert; @@ -39,27 +59,18 @@ import org.mockito.Mockito; import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; import org.mockito.stubbing.Answer; +import org.springframework.test.util.ReflectionTestUtils; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Command; -import com.cloud.agent.api.StoragePoolInfo; import com.cloud.capacity.CapacityManager; -import com.cloud.dc.DataCenterVO; import com.cloud.dc.VsphereStoragePolicyVO; -import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.VsphereStoragePolicyDao; import com.cloud.exception.AgentUnavailableException; -import com.cloud.exception.ConnectionException; -import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.OperationTimedoutException; import com.cloud.exception.StorageUnavailableException; -import com.cloud.host.Host; import com.cloud.hypervisor.HypervisorGuruManager; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.utils.Pair; import com.cloud.vm.DiskProfile; -import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.dao.VMInstanceDao; @RunWith(MockitoJUnitRunner.class) public class StorageManagerImplTest { @@ -87,11 +98,30 @@ public class StorageManagerImplTest { ConfigurationDao configurationDao; @Mock DataCenterDao dataCenterDao; + @Mock + AccountManagerImpl accountMgr; + @Mock + StoragePoolDetailsDao storagePoolDetailsDao; + + @Mock + ClusterDao clusterDao; @Spy @InjectMocks private StorageManagerImpl storageManagerImpl; + @Mock + private StoragePoolVO storagePoolVOMock; + + @Mock + private VolumeVO volume1VOMock; + + @Mock + private VolumeVO volume2VOMock; + + @Mock + private VMInstanceVO vmInstanceVOMock; + @Test public void createLocalStoragePoolName() { String hostMockName = "host1"; @@ -497,4 +527,233 @@ public class StorageManagerImplTest { .update(StorageManager.DataStoreDownloadFollowRedirects.key(),StorageManager.DataStoreDownloadFollowRedirects.defaultValue()); } + @Test + public void getStoragePoolNonDestroyedVolumesLogTestNonDestroyedVolumesReturnLog() { + Mockito.doReturn(1L).when(storagePoolVOMock).getId(); + Mockito.doReturn(1L).when(volume1VOMock).getInstanceId(); + Mockito.doReturn("786633d1-a942-4374-9d56-322dd4b0d202").when(volume1VOMock).getUuid(); + Mockito.doReturn(1L).when(volume2VOMock).getInstanceId(); + Mockito.doReturn("ffb46333-e983-4c21-b5f0-51c5877a3805").when(volume2VOMock).getUuid(); + Mockito.doReturn("58760044-928f-4c4e-9fef-d0e48423595e").when(vmInstanceVOMock).getUuid(); + + Mockito.when(_volumeDao.findByPoolId(storagePoolVOMock.getId(), null)).thenReturn(List.of(volume1VOMock, volume2VOMock)); + Mockito.doReturn(vmInstanceVOMock).when(vmInstanceDao).findById(Mockito.anyLong()); + + String log = storageManagerImpl.getStoragePoolNonDestroyedVolumesLog(storagePoolVOMock.getId()); + String expected = String.format("[Volume [%s] (attached to VM [%s]), Volume [%s] (attached to VM [%s])]", volume1VOMock.getUuid(), vmInstanceVOMock.getUuid(), volume2VOMock.getUuid(), vmInstanceVOMock.getUuid()); + + Assert.assertEquals(expected, log); + } + + private ChangeStoragePoolScopeCmd mockChangeStoragePooolScopeCmd(String newScope) { + ChangeStoragePoolScopeCmd cmd = new ChangeStoragePoolScopeCmd(); + ReflectionTestUtils.setField(cmd, "id", 1L); + ReflectionTestUtils.setField(cmd, "clusterId", 1L); + ReflectionTestUtils.setField(cmd, "scope", newScope); + return cmd; + } + + private StoragePoolVO mockStoragePoolVOForChangeStoragePoolScope(ScopeType currentScope, StoragePoolStatus status) { + StoragePoolVO primaryStorage = new StoragePoolVO(); + primaryStorage.setId(1L); + primaryStorage.setDataCenterId(1L); + primaryStorage.setClusterId(1L); + primaryStorage.setStatus(StoragePoolStatus.Disabled); + primaryStorage.setScope(currentScope); + primaryStorage.setStatus(status); + return primaryStorage; + } + + private void prepareTestChangeStoragePoolScope(ScopeType currentScope, StoragePoolStatus status) { + final DataCenterVO zone = new DataCenterVO(1L, null, null, null, null, null, null, null, null, null, DataCenter.NetworkType.Advanced, null, null); + StoragePoolVO primaryStorage = mockStoragePoolVOForChangeStoragePoolScope(currentScope, status); + + Mockito.when(accountMgr.isRootAdmin(Mockito.any())).thenReturn(true); + Mockito.when(dataCenterDao.findById(1L)).thenReturn(zone); + Mockito.when(storagePoolDao.findById(1L)).thenReturn(primaryStorage); + } + + @Test(expected = InvalidParameterValueException.class) + public void testChangeStoragePoolScopeNotDisabledException() { + prepareTestChangeStoragePoolScope(ScopeType.CLUSTER, StoragePoolStatus.Initialized); + + ChangeStoragePoolScopeCmd cmd = mockChangeStoragePooolScopeCmd("ZONE"); + storageManagerImpl.changeStoragePoolScope(cmd); + } + + @Test(expected = InvalidParameterValueException.class) + public void testChangeStoragePoolScopeToZoneHypervisorNotSupported() { + prepareTestChangeStoragePoolScope(ScopeType.CLUSTER, StoragePoolStatus.Disabled); + + final ClusterVO cluster = new ClusterVO(); + cluster.setHypervisorType(String.valueOf(HypervisorType.XenServer)); + Mockito.when(clusterDao.findById(1L)).thenReturn(cluster); + + ChangeStoragePoolScopeCmd cmd = mockChangeStoragePooolScopeCmd("ZONE"); + storageManagerImpl.changeStoragePoolScope(cmd); + } + + @Test(expected = CloudRuntimeException.class) + public void testChangeStoragePoolScopeToClusterVolumesPresentException() { + prepareTestChangeStoragePoolScope(ScopeType.ZONE, StoragePoolStatus.Disabled); + + final ClusterVO cluster = new ClusterVO(); + Mockito.when(clusterDao.findById(1L)).thenReturn(cluster); + + VMInstanceVO instance = Mockito.mock(VMInstanceVO.class); + Pair, Integer> vms = new Pair<>(List.of(instance), 1); + Mockito.when(vmInstanceDao.listByVmsNotInClusterUsingPool(1L, 1L)).thenReturn(vms); + + ChangeStoragePoolScopeCmd cmd = mockChangeStoragePooolScopeCmd("CLUSTER"); + storageManagerImpl.changeStoragePoolScope(cmd); + } + + @Test + public void testCheckNFSMountOptionsForCreateNoNFSMountOptions() { + Map details = new HashMap<>(); + try { + storageManagerImpl.checkNFSMountOptionsForCreate(details, HypervisorType.XenServer, ""); + } catch (Exception e) { + Assert.fail(); + } + } + + @Test + public void testCheckNFSMountOptionsForCreateNotKVM() { + Map details = new HashMap<>(); + details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1"); + InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class, + () -> storageManagerImpl.checkNFSMountOptionsForCreate(details, HypervisorType.XenServer, "")); + Assert.assertEquals(exception.getMessage(), "NFS options can not be set for the hypervisor type " + HypervisorType.XenServer); + } + + @Test + public void testCheckNFSMountOptionsForCreateNotNFS() { + Map details = new HashMap<>(); + details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1"); + InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class, + () -> storageManagerImpl.checkNFSMountOptionsForCreate(details, HypervisorType.KVM, "")); + Assert.assertEquals(exception.getMessage(), "NFS options can only be set on pool type " + Storage.StoragePoolType.NetworkFilesystem); + } + + @Test + public void testCheckNFSMountOptionsForUpdateNoNFSMountOptions() { + Map details = new HashMap<>(); + StoragePoolVO pool = new StoragePoolVO(); + Long accountId = 1L; + try { + storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId); + } catch (Exception e) { + Assert.fail(); + } + } + + @Test + public void testCheckNFSMountOptionsForUpdateNotRootAdmin() { + Map details = new HashMap<>(); + StoragePoolVO pool = new StoragePoolVO(); + Long accountId = 1L; + details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1"); + Mockito.when(accountMgr.isRootAdmin(accountId)).thenReturn(false); + PermissionDeniedException exception = Assert.assertThrows(PermissionDeniedException.class, + () -> storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId)); + Assert.assertEquals(exception.getMessage(), "Only root admin can modify nfs options"); + } + + @Test + public void testCheckNFSMountOptionsForUpdateNotKVM() { + Map details = new HashMap<>(); + StoragePoolVO pool = new StoragePoolVO(); + Long accountId = 1L; + details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1"); + Mockito.when(accountMgr.isRootAdmin(accountId)).thenReturn(true); + pool.setHypervisor(HypervisorType.XenServer); + InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class, + () -> storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId)); + Assert.assertEquals(exception.getMessage(), "NFS options can only be set for the hypervisor type " + HypervisorType.KVM); + } + + @Test + public void testCheckNFSMountOptionsForUpdateNotNFS() { + Map details = new HashMap<>(); + StoragePoolVO pool = new StoragePoolVO(); + Long accountId = 1L; + details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1"); + Mockito.when(accountMgr.isRootAdmin(accountId)).thenReturn(true); + pool.setHypervisor(HypervisorType.KVM); + pool.setPoolType(Storage.StoragePoolType.FiberChannel); + InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class, + () -> storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId)); + Assert.assertEquals(exception.getMessage(), "NFS options can only be set on pool type " + Storage.StoragePoolType.NetworkFilesystem); + } + + @Test + public void testCheckNFSMountOptionsForUpdateNotMaintenance() { + Map details = new HashMap<>(); + StoragePoolVO pool = new StoragePoolVO(); + Long accountId = 1L; + details.put(ApiConstants.NFS_MOUNT_OPTIONS, "vers=4.1"); + Mockito.when(accountMgr.isRootAdmin(accountId)).thenReturn(true); + pool.setHypervisor(HypervisorType.KVM); + pool.setPoolType(Storage.StoragePoolType.NetworkFilesystem); + pool.setStatus(StoragePoolStatus.Up); + InvalidParameterValueException exception = Assert.assertThrows(InvalidParameterValueException.class, + () -> storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId)); + Assert.assertEquals(exception.getMessage(), "The storage pool should be in maintenance mode to edit nfs options"); + } + + @Test(expected = InvalidParameterValueException.class) + public void testDuplicateNFSMountOptions() { + String nfsMountOpts = "vers=4.1, nconnect=4,vers=4.2"; + Map details = new HashMap<>(); + details.put(ApiConstants.NFS_MOUNT_OPTIONS, nfsMountOpts); + storageManagerImpl.checkNFSMountOptionsForCreate(details, HypervisorType.KVM, "nfs"); + } + + @Test(expected = InvalidParameterValueException.class) + public void testInvalidNFSMountOptions() { + String nfsMountOpts = "vers=4.1=2,"; + Map details = new HashMap<>(); + details.put(ApiConstants.NFS_MOUNT_OPTIONS, nfsMountOpts); + StoragePoolVO pool = new StoragePoolVO(); + pool.setHypervisor(HypervisorType.KVM); + pool.setPoolType(Storage.StoragePoolType.NetworkFilesystem); + pool.setStatus(StoragePoolStatus.Maintenance); + Long accountId = 1L; + Mockito.when(accountMgr.isRootAdmin(accountId)).thenReturn(true); + storageManagerImpl.checkNFSMountOptionsForUpdate(details, pool, accountId); + } + + @Test + public void testGetStoragePoolMountOptionsNotNFS() { + StoragePoolVO pool = new StoragePoolVO(); + + pool.setPoolType(Storage.StoragePoolType.FiberChannel); + Pair, Boolean> details = storageManagerImpl.getStoragePoolNFSMountOpts(pool, null); + Assert.assertEquals(details.second(), false); + Assert.assertEquals(details.first(), null); + } + + @Test + public void testGetStoragePoolMountOptions() { + Long poolId = 1L; + String key = "nfsmountopts"; + String value = "vers=4.1,nconnect=2"; + StoragePoolDetailVO nfsMountOpts = new StoragePoolDetailVO(poolId, key, value, true); + StoragePoolVO pool = new StoragePoolVO(); + pool.setId(poolId); + pool.setPoolType(Storage.StoragePoolType.NetworkFilesystem); + Mockito.when(storagePoolDetailsDao.findDetail(poolId, ApiConstants.NFS_MOUNT_OPTIONS)).thenReturn(nfsMountOpts); + + Pair, Boolean> details = storageManagerImpl.getStoragePoolNFSMountOpts(pool, null); + Assert.assertEquals(details.second(), true); + Assert.assertEquals(details.first().get(key), value); + } + + @Test + public void testGetStoragePoolMountFailureReason() { + String error = "Mount failed on kvm host. An incorrect mount option was specified.\nIncorrect mount option."; + String failureReason = storageManagerImpl.getStoragePoolMountFailureReason(error); + Assert.assertEquals(failureReason, "An incorrect mount option was specified"); + } } diff --git a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java index 043f62fc803..80835891327 100644 --- a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java +++ b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java @@ -44,6 +44,7 @@ import org.apache.cloudstack.api.command.user.volume.CheckAndRepairVolumeCmd; import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd; +import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -84,7 +85,6 @@ import org.mockito.junit.MockitoJUnitRunner; import org.springframework.test.util.ReflectionTestUtils; import com.cloud.api.query.dao.ServiceOfferingJoinDao; -import com.cloud.api.query.vo.ServiceOfferingJoinVO; import com.cloud.configuration.Resource; import com.cloud.configuration.Resource.ResourceType; import com.cloud.dc.DataCenterVO; @@ -145,6 +145,8 @@ public class VolumeApiServiceImplTest { @Mock private VolumeDao volumeDaoMock; @Mock + private BackupDao backupDaoMock; + @Mock private AccountManager accountManagerMock; @Mock private UserVmDao userVmDaoMock; @@ -474,44 +476,44 @@ public class VolumeApiServiceImplTest { // Negative test - try to attach non-root non-datadisk volume @Test(expected = InvalidParameterValueException.class) public void attachIncorrectDiskType() throws NoSuchFieldException, IllegalAccessException { - volumeApiServiceImpl.attachVolumeToVM(1L, 5L, 0L); + volumeApiServiceImpl.attachVolumeToVM(1L, 5L, 0L, false); } // Negative test - attach root volume to running vm @Test(expected = InvalidParameterValueException.class) public void attachRootDiskToRunningVm() throws NoSuchFieldException, IllegalAccessException { - volumeApiServiceImpl.attachVolumeToVM(1L, 6L, 0L); + volumeApiServiceImpl.attachVolumeToVM(1L, 6L, 0L, false); } // Negative test - attach root volume to non-xen vm @Test(expected = InvalidParameterValueException.class) public void attachRootDiskToHyperVm() throws NoSuchFieldException, IllegalAccessException { - volumeApiServiceImpl.attachVolumeToVM(3L, 6L, 0L); + volumeApiServiceImpl.attachVolumeToVM(3L, 6L, 0L, false); } // Negative test - attach root volume from the managed data store @Test(expected = InvalidParameterValueException.class) public void attachRootDiskOfManagedDataStore() throws NoSuchFieldException, IllegalAccessException { - volumeApiServiceImpl.attachVolumeToVM(2L, 7L, 0L); + volumeApiServiceImpl.attachVolumeToVM(2L, 7L, 0L, false); } // Negative test - root volume can't be attached to the vm already having a root volume attached @Test(expected = InvalidParameterValueException.class) public void attachRootDiskToVmHavingRootDisk() throws NoSuchFieldException, IllegalAccessException { - volumeApiServiceImpl.attachVolumeToVM(4L, 6L, 0L); + volumeApiServiceImpl.attachVolumeToVM(4L, 6L, 0L, false); } // Negative test - root volume in uploaded state can't be attached @Test(expected = InvalidParameterValueException.class) public void attachRootInUploadedState() throws NoSuchFieldException, IllegalAccessException { - volumeApiServiceImpl.attachVolumeToVM(2L, 8L, 0L); + volumeApiServiceImpl.attachVolumeToVM(2L, 8L, 0L, false); } // Positive test - attach ROOT volume in correct state, to the vm not having root volume attached @Test public void attachRootVolumePositive() throws NoSuchFieldException, IllegalAccessException { thrown.expect(NullPointerException.class); - volumeApiServiceImpl.attachVolumeToVM(2L, 6L, 0L); + volumeApiServiceImpl.attachVolumeToVM(2L, 6L, 0L, false); } // Negative test - attach data volume, to the vm on non-kvm hypervisor @@ -520,7 +522,7 @@ public class VolumeApiServiceImplTest { DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class); when(diskOffering.getEncrypt()).thenReturn(true); when(_diskOfferingDao.findById(anyLong())).thenReturn(diskOffering); - volumeApiServiceImpl.attachVolumeToVM(2L, 10L, 1L); + volumeApiServiceImpl.attachVolumeToVM(2L, 10L, 1L, false); } // Positive test - attach data volume, to the vm on kvm hypervisor @@ -530,7 +532,7 @@ public class VolumeApiServiceImplTest { DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class); when(diskOffering.getEncrypt()).thenReturn(true); when(_diskOfferingDao.findById(anyLong())).thenReturn(diskOffering); - volumeApiServiceImpl.attachVolumeToVM(4L, 10L, 1L); + volumeApiServiceImpl.attachVolumeToVM(4L, 10L, 1L, false); } // volume not Ready @@ -632,7 +634,7 @@ public class VolumeApiServiceImplTest { when(vm.getState()).thenReturn(State.Running); when(vm.getDataCenterId()).thenReturn(34L); when(vm.getBackupOfferingId()).thenReturn(null); - when(vm.getBackupVolumeList()).thenReturn(Collections.emptyList()); + when(backupDaoMock.listByVmId(anyLong(), anyLong())).thenReturn(Collections.emptyList()); when(volumeDaoMock.findByInstanceAndType(anyLong(), any(Volume.Type.class))).thenReturn(new ArrayList<>(10)); when(volumeDataFactoryMock.getVolume(9L)).thenReturn(volumeToAttach); when(volumeToAttach.getState()).thenReturn(Volume.State.Uploaded); @@ -640,7 +642,7 @@ public class VolumeApiServiceImplTest { when(_dcDao.findById(anyLong())).thenReturn(zoneWithDisabledLocalStorage); when(zoneWithDisabledLocalStorage.isLocalStorageEnabled()).thenReturn(true); try { - volumeApiServiceImpl.attachVolumeToVM(2L, 9L, null); + volumeApiServiceImpl.attachVolumeToVM(2L, 9L, null, false); } catch (InvalidParameterValueException e) { Assert.assertEquals(e.getMessage(), ("primary storage resource limit check failed")); } @@ -1363,10 +1365,8 @@ public class VolumeApiServiceImplTest { when(volume.getTemplateId()).thenReturn(1l); DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class); - - ServiceOfferingJoinVO serviceOfferingJoinVO = Mockito.mock(ServiceOfferingJoinVO.class); - when(serviceOfferingJoinVO.getRootDiskSize()).thenReturn(rootDisk); - when(serviceOfferingJoinDao.findById(anyLong())).thenReturn(serviceOfferingJoinVO); + when(diskOffering.isComputeOnly()).thenReturn(true); + when(diskOffering.getDiskSize()).thenReturn(rootDisk); VMTemplateVO template = Mockito.mock(VMTemplateVO.class); when(template.getFormat()).thenReturn(imageFormat); diff --git a/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerTest.java b/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerTest.java index 74b31283d9d..28903c72cc3 100755 --- a/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerTest.java +++ b/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerTest.java @@ -27,13 +27,20 @@ import static org.mockito.Mockito.when; import java.lang.reflect.Field; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; +import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; +import com.cloud.api.ApiDBUtils; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.storage.Storage; +import org.apache.cloudstack.api.command.user.snapshot.ExtractSnapshotCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; @@ -49,6 +56,7 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -176,6 +184,16 @@ public class SnapshotManagerTest { @Mock DataCenterDao dataCenterDao; + MockedStatic apiDBUtilsMock; + @Mock + ExtractSnapshotCmd extractSnapshotCmdMock; + @Mock + DataCenterVO dataCenterVOMock; + @Mock + ImageStoreEntity imageStoreEntityMock; + @Mock + DataStoreManager dataStoreManagerMock; + SnapshotPolicyVO snapshotPolicyVoInstance; List listIntervalTypes = Arrays.asList(DateUtil.IntervalType.values()); @@ -191,6 +209,11 @@ public class SnapshotManagerTest { private static final int TEST_SNAPSHOT_POLICY_MAX_SNAPS = 1; private static final boolean TEST_SNAPSHOT_POLICY_DISPLAY = true; private static final boolean TEST_SNAPSHOT_POLICY_ACTIVE = true; + private static final long TEST_ZONE_ID = 7L; + private static final long TEST_SNAPSHOTDATASTORE_ID = 7L; + private static final String TEST_EXTRACT_URL = "extractUrl"; + private static final String TEST_SNAPSHOT_PATH = "path"; + private static final Storage.ImageFormat TEST_VOLUME_FORMAT = Storage.ImageFormat.RAW; @Before public void setup() throws ResourceAllocationException { @@ -228,10 +251,13 @@ public class SnapshotManagerTest { snapshotPolicyVoInstance = new SnapshotPolicyVO(TEST_VOLUME_ID, TEST_SNAPSHOT_POLICY_SCHEDULE, TEST_SNAPSHOT_POLICY_TIMEZONE, TEST_SNAPSHOT_POLICY_INTERVAL, TEST_SNAPSHOT_POLICY_MAX_SNAPS, TEST_SNAPSHOT_POLICY_DISPLAY); + + apiDBUtilsMock = Mockito.mockStatic(ApiDBUtils.class); } @After public void tearDown() throws Exception { + apiDBUtilsMock.close(); CallContext.unregister(); } @@ -533,4 +559,108 @@ public class SnapshotManagerTest { mockForBackupSnapshotToSecondaryZoneTest(true, DataCenter.Type.Edge); Assert.assertFalse(_snapshotMgr.isBackupSnapshotToSecondaryForZone(1L)); } + + private void mockForExtractSnapshotTests() { + Mockito.doReturn(TEST_SNAPSHOT_ID).when(extractSnapshotCmdMock).getId(); + Mockito.doReturn(TEST_ZONE_ID).when(extractSnapshotCmdMock).getZoneId(); + Mockito.doReturn(false).when(_accountMgr).isRootAdmin(Mockito.anyLong()); + Mockito.when(ApiDBUtils.isExtractionDisabled()).thenReturn(false); + + Mockito.doReturn(dataCenterVOMock).when(dataCenterDao).findById(TEST_ZONE_ID); + + List dataStores = new ArrayList<>(); + dataStores.add(imageStoreEntityMock); + Mockito.doReturn(dataStores).when(dataStoreManagerMock).getImageStoresByScope(Mockito.any()); + Mockito.doReturn(TEST_STORAGE_POOL_ID).when(imageStoreEntityMock).getId(); + + Mockito.doReturn(snapshotStoreMock).when(snapshotStoreDao).findByStoreSnapshot(DataStoreRole.Image, TEST_STORAGE_POOL_ID, TEST_SNAPSHOT_ID); + + Mockito.doReturn(snapshotInfoMock).when(snapshotFactory).getSnapshot(TEST_SNAPSHOT_ID, imageStoreEntityMock); + Mockito.doReturn(TEST_SNAPSHOT_PATH).when(snapshotInfoMock).getPath(); + Mockito.doReturn(volumeInfoMock).when(snapshotInfoMock).getBaseVolume(); + Mockito.doReturn(TEST_VOLUME_FORMAT).when(volumeInfoMock).getFormat(); + + Mockito.doReturn(TEST_SNAPSHOTDATASTORE_ID).when(snapshotStoreMock).getId(); + Mockito.doReturn(TEST_EXTRACT_URL).when(imageStoreEntityMock).createEntityExtractUrl(TEST_SNAPSHOT_PATH, TEST_VOLUME_FORMAT, snapshotInfoMock); + } + + @Test(expected = PermissionDeniedException.class) + public void extractSnapshotTestNotRootAdminDisabledExtractionReturnException() { + mockForExtractSnapshotTests(); + Mockito.when(ApiDBUtils.isExtractionDisabled()).thenReturn(true); + + _snapshotMgr.extractSnapshot(extractSnapshotCmdMock); + } + + @Test(expected = InvalidParameterValueException.class) + public void extractSnapshotTestNullSnapshotReturnException() { + mockForExtractSnapshotTests(); + Mockito.doReturn(null).when(_snapshotDao).findById(TEST_SNAPSHOT_ID); + + _snapshotMgr.extractSnapshot(extractSnapshotCmdMock); + } + + @Test(expected = InvalidParameterValueException.class) + public void extractSnapshotTestRemovedSnapshotReturnException() { + mockForExtractSnapshotTests(); + Mockito.doReturn(Mockito.mock(Date.class)).when(snapshotMock).getRemoved(); + Mockito.doReturn(snapshotMock).when(_snapshotDao).findById(TEST_SNAPSHOT_ID); + + _snapshotMgr.extractSnapshot(extractSnapshotCmdMock); + } + + @Test(expected = IllegalArgumentException.class) + public void extractSnapshotTestNullDataCenterReturnException() { + mockForExtractSnapshotTests(); + Mockito.doReturn(null).when(dataCenterDao).findById(TEST_ZONE_ID); + + _snapshotMgr.extractSnapshot(extractSnapshotCmdMock); + } + + @Test(expected = InvalidParameterValueException.class) + public void extractSnapshotTestNoZoneStoragesReturnException() { + mockForExtractSnapshotTests(); + Mockito.doReturn(Collections.emptyList()).when(dataStoreManagerMock).getImageStoresByScope(Mockito.any()); + + _snapshotMgr.extractSnapshot(extractSnapshotCmdMock); + } + + @Test() + public void extractSnapshotTestExistingExtractUrlReturnUrl() { + mockForExtractSnapshotTests(); + String extractUrl = "extractUrl"; + Mockito.doReturn(extractUrl).when(snapshotStoreMock).getExtractUrl(); + + Assert.assertEquals(extractUrl, _snapshotMgr.extractSnapshot(extractSnapshotCmdMock)); + Mockito.verify(snapshotSrv, Mockito.never()).syncVolumeSnapshotsToRegionStore(Mockito.anyLong(), Mockito.any()); + Mockito.verify(snapshotStoreDao, Mockito.never()).update(Mockito.anyLong(), Mockito.any()); + } + + @Test(expected = InvalidParameterValueException.class) + public void extractSnapshotTestNullSnapshotStoreReturnException() { + mockForExtractSnapshotTests(); + Mockito.doReturn(null).when(snapshotStoreDao).findByStoreSnapshot(DataStoreRole.Image, TEST_STORAGE_POOL_ID, TEST_SNAPSHOT_ID); + + _snapshotMgr.extractSnapshot(extractSnapshotCmdMock); + } + + @Test() + public void extractSnapshotTestCreateExtractUrlReturnUrl() { + mockForExtractSnapshotTests(); + + Assert.assertEquals(TEST_EXTRACT_URL, _snapshotMgr.extractSnapshot(extractSnapshotCmdMock)); + Mockito.verify(snapshotSrv).syncVolumeSnapshotsToRegionStore(TEST_VOLUME_ID, imageStoreEntityMock); + Mockito.verify(snapshotStoreDao).update(TEST_SNAPSHOTDATASTORE_ID, snapshotStoreMock); + } + + @Test() + public void extractSnapshotTestRootAdminDisabledExtractionCreateExtractUrlReturnUrl() { + mockForExtractSnapshotTests(); + Mockito.doReturn(true).when(_accountMgr).isRootAdmin(Mockito.anyLong()); + Mockito.when(ApiDBUtils.isExtractionDisabled()).thenReturn(true); + + Assert.assertEquals(TEST_EXTRACT_URL, _snapshotMgr.extractSnapshot(extractSnapshotCmdMock)); + Mockito.verify(snapshotSrv).syncVolumeSnapshotsToRegionStore(TEST_VOLUME_ID, imageStoreEntityMock); + Mockito.verify(snapshotStoreDao).update(TEST_SNAPSHOTDATASTORE_ID, snapshotStoreMock); + } } diff --git a/server/src/test/java/com/cloud/storage/snapshot/SnapshotSchedulerImplTest.java b/server/src/test/java/com/cloud/storage/snapshot/SnapshotSchedulerImplTest.java index 971af289ef7..3827531891f 100644 --- a/server/src/test/java/com/cloud/storage/snapshot/SnapshotSchedulerImplTest.java +++ b/server/src/test/java/com/cloud/storage/snapshot/SnapshotSchedulerImplTest.java @@ -26,6 +26,9 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.user.AccountVO; import com.cloud.user.dao.AccountDao; +import org.apache.cloudstack.framework.jobs.dao.AsyncJobDao; +import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; +import org.apache.cloudstack.jobs.JobInfo; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; @@ -65,6 +68,16 @@ public class SnapshotSchedulerImplTest { @Mock AccountVO accountVoMock; + @Mock + private SnapshotScheduleVO snapshotScheduleVoMock; + + @Mock + private AsyncJobDao asyncJobDaoMock; + + @Mock + private AsyncJobVO asyncJobVoMock; + + @Test public void scheduleNextSnapshotJobTestParameterIsNullReturnNull() { SnapshotScheduleVO snapshotScheduleVO = null; @@ -215,4 +228,50 @@ public class SnapshotSchedulerImplTest { Mockito.verify(snapshotScheduleDaoMock, Mockito.never()).remove(Mockito.anyLong()); } + + @Test + public void scheduleNextSnapshotJobIfNecessaryTestAsyncJobIsNullThenScheduleNextSnapshot() { + Mockito.doReturn(1L).when(snapshotScheduleVoMock).getAsyncJobId(); + Mockito.doReturn(null).when(asyncJobDaoMock).findByIdIncludingRemoved(Mockito.any()); + Mockito.doReturn(new Date()).when(snapshotSchedulerImplSpy).scheduleNextSnapshotJob(Mockito.any(SnapshotScheduleVO.class)); + + snapshotSchedulerImplSpy.scheduleNextSnapshotJobIfNecessary(snapshotScheduleVoMock); + + Mockito.verify(snapshotSchedulerImplSpy).scheduleNextSnapshotJob(Mockito.any(SnapshotScheduleVO.class)); + } + + @Test + public void scheduleNextSnapshotJobIfNecessaryTestAsyncJobSucceededThenScheduleNextSnapshot() { + Mockito.doReturn(1L).when(snapshotScheduleVoMock).getAsyncJobId(); + Mockito.doReturn(asyncJobVoMock).when(asyncJobDaoMock).findByIdIncludingRemoved(Mockito.any()); + Mockito.doReturn(JobInfo.Status.SUCCEEDED).when(asyncJobVoMock).getStatus(); + Mockito.doReturn(new Date()).when(snapshotSchedulerImplSpy).scheduleNextSnapshotJob(Mockito.any(SnapshotScheduleVO.class)); + + snapshotSchedulerImplSpy.scheduleNextSnapshotJobIfNecessary(snapshotScheduleVoMock); + + Mockito.verify(snapshotSchedulerImplSpy).scheduleNextSnapshotJob(Mockito.any(SnapshotScheduleVO.class)); + } + + @Test + public void scheduleNextSnapshotJobIfNecessaryTestAsyncJobFailedThenScheduleNextSnapshot() { + Mockito.doReturn(1L).when(snapshotScheduleVoMock).getAsyncJobId(); + Mockito.doReturn(asyncJobVoMock).when(asyncJobDaoMock).findByIdIncludingRemoved(Mockito.any()); + Mockito.doReturn(JobInfo.Status.FAILED).when(asyncJobVoMock).getStatus(); + Mockito.doReturn(new Date()).when(snapshotSchedulerImplSpy).scheduleNextSnapshotJob(Mockito.any(SnapshotScheduleVO.class)); + + snapshotSchedulerImplSpy.scheduleNextSnapshotJobIfNecessary(snapshotScheduleVoMock); + + Mockito.verify(snapshotSchedulerImplSpy).scheduleNextSnapshotJob(Mockito.any(SnapshotScheduleVO.class)); + } + + @Test + public void scheduleNextSnapshotJobIfNecessaryTestAsyncJobInProgressThenDoNothing() { + Mockito.doReturn(1L).when(snapshotScheduleVoMock).getAsyncJobId(); + Mockito.doReturn(asyncJobVoMock).when(asyncJobDaoMock).findByIdIncludingRemoved(Mockito.any()); + Mockito.doReturn(JobInfo.Status.IN_PROGRESS).when(asyncJobVoMock).getStatus(); + + snapshotSchedulerImplSpy.scheduleNextSnapshotJobIfNecessary(snapshotScheduleVoMock); + + Mockito.verify(snapshotSchedulerImplSpy, Mockito.never()).scheduleNextSnapshotJob(Mockito.any(SnapshotScheduleVO.class)); + } } diff --git a/server/src/test/java/com/cloud/template/HypervisorTemplateAdapterTest.java b/server/src/test/java/com/cloud/template/HypervisorTemplateAdapterTest.java index d90ae5a2f03..5307beb4aba 100644 --- a/server/src/test/java/com/cloud/template/HypervisorTemplateAdapterTest.java +++ b/server/src/test/java/com/cloud/template/HypervisorTemplateAdapterTest.java @@ -18,25 +18,26 @@ package com.cloud.template; -import com.cloud.dc.DataCenterVO; -import com.cloud.dc.dao.DataCenterDao; -import com.cloud.event.EventTypes; -import com.cloud.event.UsageEventUtils; -import com.cloud.event.UsageEventVO; -import com.cloud.event.dao.UsageEventDao; -import com.cloud.exception.InvalidParameterValueException; -import com.cloud.org.Grouping; -import com.cloud.server.StatsCollector; -import com.cloud.storage.Storage.ImageFormat; -import com.cloud.storage.TemplateProfile; -import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; -import com.cloud.storage.VMTemplateVO; -import com.cloud.storage.dao.VMTemplateZoneDao; -import com.cloud.user.AccountVO; -import com.cloud.user.ResourceLimitService; -import com.cloud.user.dao.AccountDao; -import com.cloud.utils.component.ComponentContext; -import com.cloud.utils.exception.CloudRuntimeException; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.when; + +import java.lang.reflect.Field; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; + import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; @@ -46,8 +47,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService.Templa import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.events.Event; -import org.apache.cloudstack.framework.events.EventBus; -import org.apache.cloudstack.framework.events.EventBusException; +import org.apache.cloudstack.framework.events.EventDistributor; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.secstorage.heuristics.HeuristicType; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; @@ -70,30 +70,30 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.junit.MockitoJUnitRunner; import org.mockito.stubbing.Answer; -import java.lang.reflect.Field; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ExecutionException; - -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyLong; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.when; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.event.EventTypes; +import com.cloud.event.UsageEventUtils; +import com.cloud.event.UsageEventVO; +import com.cloud.event.dao.UsageEventDao; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.org.Grouping; +import com.cloud.server.StatsCollector; +import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.TemplateProfile; +import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.VMTemplateZoneDao; +import com.cloud.user.AccountVO; +import com.cloud.user.ResourceLimitService; +import com.cloud.user.dao.AccountDao; +import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.exception.CloudRuntimeException; @RunWith(MockitoJUnitRunner.class) public class HypervisorTemplateAdapterTest { @Mock - EventBus _bus; + EventDistributor eventDistributor; List events = new ArrayList<>(); @Mock @@ -168,7 +168,7 @@ public class HypervisorTemplateAdapterTest { closeable.close(); } - public UsageEventUtils setupUsageUtils() throws EventBusException { + public UsageEventUtils setupUsageUtils() { Mockito.when(_configDao.getValue(eq("publish.usage.events"))).thenReturn("true"); Mockito.when(_usageEventDao.persist(Mockito.any(UsageEventVO.class))).then(new Answer() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { @@ -180,16 +180,14 @@ public class HypervisorTemplateAdapterTest { Mockito.when(_usageEventDao.listAll()).thenReturn(usageEvents); - doAnswer(new Answer() { - @Override public Void answer(InvocationOnMock invocation) throws Throwable { - Event event = (Event)invocation.getArguments()[0]; - events.add(event); - return null; - } - }).when(_bus).publish(any(Event.class)); + doAnswer((Answer) invocation -> { + Event event = (Event)invocation.getArguments()[0]; + events.add(event); + return null; + }).when(eventDistributor).publish(any(Event.class)); componentContextMocked = Mockito.mockStatic(ComponentContext.class); - when(ComponentContext.getComponent(eq(EventBus.class))).thenReturn(_bus); + when(ComponentContext.getComponent(eq(EventDistributor.class))).thenReturn(eventDistributor); UsageEventUtils utils = new UsageEventUtils(); @@ -257,7 +255,7 @@ public class HypervisorTemplateAdapterTest { } //@Test - public void testEmitDeleteEventUuid() throws InterruptedException, ExecutionException, EventBusException { + public void testEmitDeleteEventUuid() throws InterruptedException, ExecutionException { //All the mocks required for this test to work. ImageStoreEntity store = mock(ImageStoreEntity.class); when(store.getId()).thenReturn(1l); diff --git a/server/src/test/java/com/cloud/user/AccountManagerImplTest.java b/server/src/test/java/com/cloud/user/AccountManagerImplTest.java index d98a4f8f058..9daa19206fa 100644 --- a/server/src/test/java/com/cloud/user/AccountManagerImplTest.java +++ b/server/src/test/java/com/cloud/user/AccountManagerImplTest.java @@ -16,6 +16,40 @@ // under the License. package com.cloud.user; +import static org.mockito.ArgumentMatchers.nullable; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.acl.SecurityChecker.AccessType; +import org.apache.cloudstack.api.command.admin.user.DeleteUserCmd; + +import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.api.command.admin.user.GetUserKeysCmd; +import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd; +import org.apache.cloudstack.api.response.UserTwoFactorAuthenticationSetupResponse; +import org.apache.cloudstack.auth.UserAuthenticator; +import org.apache.cloudstack.auth.UserAuthenticator.ActionOnFailedAuthentication; +import org.apache.cloudstack.auth.UserTwoFactorAuthenticator; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.webhook.WebhookHelper; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InOrder; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.beans.factory.NoSuchBeanDefinitionException; + import com.cloud.acl.DomainChecker; import com.cloud.api.auth.SetupUserTwoFactorAuthenticationCmd; import com.cloud.domain.Domain; @@ -28,40 +62,12 @@ import com.cloud.projects.Project; import com.cloud.projects.ProjectAccountVO; import com.cloud.user.Account.State; import com.cloud.utils.Pair; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.UserVmManagerImpl; import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.snapshot.VMSnapshotVO; -import org.apache.cloudstack.acl.SecurityChecker.AccessType; -import org.apache.cloudstack.api.command.admin.user.DeleteUserCmd; -import org.apache.cloudstack.api.command.admin.user.GetUserKeysCmd; -import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd; -import org.apache.cloudstack.api.response.UserTwoFactorAuthenticationSetupResponse; -import org.apache.cloudstack.auth.UserAuthenticator; -import org.apache.cloudstack.auth.UserAuthenticator.ActionOnFailedAuthentication; -import org.apache.cloudstack.auth.UserTwoFactorAuthenticator; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.framework.config.ConfigKey; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.InOrder; -import org.mockito.Mock; -import org.mockito.MockedStatic; -import org.mockito.Mockito; -import org.mockito.junit.MockitoJUnitRunner; - -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.mockito.ArgumentMatchers.nullable; @RunWith(MockitoJUnitRunner.class) public class AccountManagerImplTest extends AccountManagetImplTestBase { @@ -173,6 +179,7 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { Mockito.when(_sshKeyPairDao.listKeyPairs(Mockito.anyLong(), Mockito.anyLong())).thenReturn(sshkeyList); Mockito.when(_sshKeyPairDao.remove(Mockito.anyLong())).thenReturn(true); Mockito.when(userDataDao.removeByAccountId(Mockito.anyLong())).thenReturn(222); + Mockito.doNothing().when(accountManagerImpl).deleteWebhooksForAccount(Mockito.anyLong()); Assert.assertTrue(accountManagerImpl.deleteUserAccount(42l)); // assert that this was a clean delete @@ -192,12 +199,46 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { Mockito.when(_vmMgr.expunge(Mockito.any(UserVmVO.class))).thenReturn(false); Mockito.lenient().when(_domainMgr.getDomain(Mockito.anyLong())).thenReturn(domain); Mockito.lenient().when(securityChecker.checkAccess(Mockito.any(Account.class), Mockito.any(Domain.class))).thenReturn(true); + Mockito.doNothing().when(accountManagerImpl).deleteWebhooksForAccount(Mockito.anyLong()); Assert.assertTrue(accountManagerImpl.deleteUserAccount(42l)); // assert that this was NOT a clean delete Mockito.verify(_accountDao, Mockito.atLeastOnce()).markForCleanup(Mockito.eq(42l)); } + @Test (expected = InvalidParameterValueException.class) + public void deleteUserAccountTestIfAccountIdIsEqualToCallerIdShouldThrowException() { + try (MockedStatic callContextMocked = Mockito.mockStatic(CallContext.class)) { + CallContext callContextMock = Mockito.mock(CallContext.class); + callContextMocked.when(CallContext::current).thenReturn(callContextMock); + long accountId = 1L; + + Mockito.doReturn(accountVoMock).when(callContextMock).getCallingAccount(); + Mockito.doReturn(accountVoMock).when(_accountDao).findById(Mockito.anyLong()); + Mockito.doReturn(domainVoMock).when(_domainDao).findById(Mockito.anyLong()); + Mockito.doReturn(1L).when(accountVoMock).getId(); + + accountManagerImpl.deleteUserAccount(accountId); + } + } + + @Test + public void deleteUserAccountTestIfAccountIdIsNotEqualToCallerAccountIdShouldNotThrowException() { + try (MockedStatic callContextMocked = Mockito.mockStatic(CallContext.class)) { + CallContext callContextMock = Mockito.mock(CallContext.class); + callContextMocked.when(CallContext::current).thenReturn(callContextMock); + long accountId = 1L; + + Mockito.doReturn(accountVoMock).when(callContextMock).getCallingAccount(); + Mockito.doReturn(accountVoMock).when(_accountDao).findById(Mockito.anyLong()); + Mockito.doReturn(2L).when(accountVoMock).getId(); + Mockito.doReturn(true).when(accountManagerImpl).isDeleteNeeded(Mockito.any(), Mockito.anyLong(), Mockito.any()); + Mockito.doReturn(new ArrayList()).when(_projectAccountDao).listAdministratedProjectIds(Mockito.anyLong()); + + accountManagerImpl.deleteUserAccount(accountId); + } + } + @Test (expected = InvalidParameterValueException.class) public void deleteUserTestIfUserIdIsEqualToCallerIdShouldThrowException() { try (MockedStatic callContextMocked = Mockito.mockStatic(CallContext.class)) { @@ -247,6 +288,7 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { Mockito.when(userAuthenticator.authenticate("test", "fail", 1L, new HashMap<>())).thenReturn(failureAuthenticationPair); Mockito.lenient().when(userAuthenticator.authenticate("test", null, 1L, new HashMap<>())).thenReturn(successAuthenticationPair); Mockito.lenient().when(userAuthenticator.authenticate("test", "", 1L, new HashMap<>())).thenReturn(successAuthenticationPair); + Mockito.when(userAuthenticator.getName()).thenReturn("test"); //Test for incorrect password. authentication should fail UserAccount userAccount = accountManagerImpl.authenticateUser("test", "fail", 1L, InetAddress.getByName("127.0.0.1"), new HashMap<>()); @@ -284,6 +326,63 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { accountManagerImpl.getKeys(_listkeyscmd); } + @Test(expected = PermissionDeniedException.class) + public void testGetUserKeysCmdDomainAdminRootAdminUser() { + CallContext.register(callingUser, callingAccount); + Mockito.when(_listkeyscmd.getID()).thenReturn(2L); + Mockito.when(accountManagerImpl.getActiveUser(2L)).thenReturn(userVoMock); + Mockito.when(userAccountDaoMock.findById(2L)).thenReturn(userAccountVO); + Mockito.when(userAccountVO.getAccountId()).thenReturn(2L); + Mockito.when(userDetailsDaoMock.listDetailsKeyPairs(Mockito.anyLong())).thenReturn(null); + + // Queried account - admin account + AccountVO adminAccountMock = Mockito.mock(AccountVO.class); + Mockito.when(adminAccountMock.getAccountId()).thenReturn(2L); + Mockito.when(_accountDao.findByIdIncludingRemoved(2L)).thenReturn(adminAccountMock); + Mockito.lenient().when(accountService.isRootAdmin(2L)).thenReturn(true); + Mockito.lenient().when(securityChecker.checkAccess(Mockito.any(Account.class), + Mockito.nullable(ControlledEntity.class), Mockito.nullable(AccessType.class), Mockito.anyString())).thenReturn(true); + + // Calling account is domain admin of the ROOT domain + Mockito.lenient().when(callingAccount.getType()).thenReturn(Account.Type.DOMAIN_ADMIN); + Mockito.lenient().when(callingAccount.getDomainId()).thenReturn(Domain.ROOT_DOMAIN); + + Mockito.lenient().when(callingUser.getAccountId()).thenReturn(2L); + Mockito.lenient().when(_accountDao.findById(2L)).thenReturn(callingAccount); + + Mockito.lenient().when(accountService.isDomainAdmin(Mockito.anyLong())).thenReturn(Boolean.TRUE); + Mockito.lenient().when(accountMock.getAccountId()).thenReturn(2L); + + accountManagerImpl.getKeys(_listkeyscmd); + } + + @Test + public void testPreventRootDomainAdminAccessToRootAdminKeysNormalUser() { + User user = Mockito.mock(User.class); + ControlledEntity entity = Mockito.mock(ControlledEntity.class); + Mockito.when(user.getAccountId()).thenReturn(1L); + AccountVO account = Mockito.mock(AccountVO.class); + Mockito.when(account.getType()).thenReturn(Account.Type.NORMAL); + Mockito.when(_accountDao.findById(1L)).thenReturn(account); + accountManagerImpl.preventRootDomainAdminAccessToRootAdminKeys(user, entity); + Mockito.verify(accountManagerImpl, Mockito.never()).isRootAdmin(Mockito.anyLong()); + } + + @Test(expected = PermissionDeniedException.class) + public void testPreventRootDomainAdminAccessToRootAdminKeysRootDomainAdminUser() { + User user = Mockito.mock(User.class); + ControlledEntity entity = Mockito.mock(ControlledEntity.class); + Mockito.when(user.getAccountId()).thenReturn(1L); + AccountVO account = Mockito.mock(AccountVO.class); + Mockito.when(account.getType()).thenReturn(Account.Type.DOMAIN_ADMIN); + Mockito.when(account.getDomainId()).thenReturn(Domain.ROOT_DOMAIN); + Mockito.when(_accountDao.findById(1L)).thenReturn(account); + Mockito.when(entity.getAccountId()).thenReturn(1L); + Mockito.lenient().when(securityChecker.checkAccess(Mockito.any(Account.class), + Mockito.nullable(ControlledEntity.class), Mockito.nullable(AccessType.class), Mockito.anyString())).thenReturn(true); + accountManagerImpl.preventRootDomainAdminAccessToRootAdminKeys(user, entity); + } + @Test public void updateUserTestTimeZoneAndEmailNull() { prepareMockAndExecuteUpdateUserTest(0); @@ -306,7 +405,7 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { Mockito.doNothing().when(accountManagerImpl).validateAndUpdateFirstNameIfNeeded(UpdateUserCmdMock, userVoMock); Mockito.doNothing().when(accountManagerImpl).validateAndUpdateLastNameIfNeeded(UpdateUserCmdMock, userVoMock); Mockito.doNothing().when(accountManagerImpl).validateAndUpdateUsernameIfNeeded(UpdateUserCmdMock, userVoMock, accountMock); - Mockito.doNothing().when(accountManagerImpl).validateUserPasswordAndUpdateIfNeeded(Mockito.anyString(), Mockito.eq(userVoMock), Mockito.anyString()); + Mockito.doNothing().when(accountManagerImpl).validateUserPasswordAndUpdateIfNeeded(Mockito.anyString(), Mockito.eq(userVoMock), Mockito.anyString(), Mockito.eq(false)); Mockito.doReturn(true).when(userDaoMock).update(Mockito.anyLong(), Mockito.eq(userVoMock)); Mockito.doReturn(Mockito.mock(UserAccountVO.class)).when(userAccountDaoMock).findById(Mockito.anyLong()); @@ -322,7 +421,7 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { inOrder.verify(accountManagerImpl).validateAndUpdateFirstNameIfNeeded(UpdateUserCmdMock, userVoMock); inOrder.verify(accountManagerImpl).validateAndUpdateLastNameIfNeeded(UpdateUserCmdMock, userVoMock); inOrder.verify(accountManagerImpl).validateAndUpdateUsernameIfNeeded(UpdateUserCmdMock, userVoMock, accountMock); - inOrder.verify(accountManagerImpl).validateUserPasswordAndUpdateIfNeeded(UpdateUserCmdMock.getPassword(), userVoMock, UpdateUserCmdMock.getCurrentPassword()); + inOrder.verify(accountManagerImpl).validateUserPasswordAndUpdateIfNeeded(UpdateUserCmdMock.getPassword(), userVoMock, UpdateUserCmdMock.getCurrentPassword(), false); inOrder.verify(userVoMock, Mockito.times(numberOfExpectedCallsForSetEmailAndSetTimeZone)).setEmail(Mockito.anyString()); inOrder.verify(userVoMock, Mockito.times(numberOfExpectedCallsForSetEmailAndSetTimeZone)).setTimezone(Mockito.anyString()); @@ -608,14 +707,14 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { @Test public void valiateUserPasswordAndUpdateIfNeededTestPasswordNull() { - accountManagerImpl.validateUserPasswordAndUpdateIfNeeded(null, userVoMock, null); + accountManagerImpl.validateUserPasswordAndUpdateIfNeeded(null, userVoMock, null, false); Mockito.verify(userVoMock, Mockito.times(0)).setPassword(Mockito.anyString()); } @Test(expected = InvalidParameterValueException.class) public void valiateUserPasswordAndUpdateIfNeededTestBlankPassword() { - accountManagerImpl.validateUserPasswordAndUpdateIfNeeded(" ", userVoMock, null); + accountManagerImpl.validateUserPasswordAndUpdateIfNeeded(" ", userVoMock, null, false); } @Test(expected = InvalidParameterValueException.class) @@ -629,7 +728,7 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { Mockito.lenient().doNothing().when(passwordPolicyMock).verifyIfPasswordCompliesWithPasswordPolicies(Mockito.anyString(), Mockito.anyString(), Mockito.anyLong()); - accountManagerImpl.validateUserPasswordAndUpdateIfNeeded("newPassword", userVoMock, " "); + accountManagerImpl.validateUserPasswordAndUpdateIfNeeded("newPassword", userVoMock, " ", false); } @Test(expected = CloudRuntimeException.class) @@ -644,7 +743,7 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { Mockito.lenient().doNothing().when(passwordPolicyMock).verifyIfPasswordCompliesWithPasswordPolicies(Mockito.anyString(), Mockito.anyString(), Mockito.anyLong()); - accountManagerImpl.validateUserPasswordAndUpdateIfNeeded("newPassword", userVoMock, null); + accountManagerImpl.validateUserPasswordAndUpdateIfNeeded("newPassword", userVoMock, null, false); } @Test @@ -663,7 +762,7 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { Mockito.lenient().doNothing().when(passwordPolicyMock).verifyIfPasswordCompliesWithPasswordPolicies(Mockito.anyString(), Mockito.anyString(), Mockito.anyLong()); - accountManagerImpl.validateUserPasswordAndUpdateIfNeeded(newPassword, userVoMock, null); + accountManagerImpl.validateUserPasswordAndUpdateIfNeeded(newPassword, userVoMock, null, false); Mockito.verify(accountManagerImpl, Mockito.times(0)).validateCurrentPassword(Mockito.eq(userVoMock), Mockito.anyString()); Mockito.verify(userVoMock, Mockito.times(1)).setPassword(expectedUserPasswordAfterEncoded); @@ -685,7 +784,7 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { Mockito.lenient().doNothing().when(passwordPolicyMock).verifyIfPasswordCompliesWithPasswordPolicies(Mockito.anyString(), Mockito.anyString(), Mockito.anyLong()); - accountManagerImpl.validateUserPasswordAndUpdateIfNeeded(newPassword, userVoMock, null); + accountManagerImpl.validateUserPasswordAndUpdateIfNeeded(newPassword, userVoMock, null, false); Mockito.verify(accountManagerImpl, Mockito.times(0)).validateCurrentPassword(Mockito.eq(userVoMock), Mockito.anyString()); Mockito.verify(userVoMock, Mockito.times(1)).setPassword(expectedUserPasswordAfterEncoded); @@ -708,7 +807,7 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { Mockito.lenient().doNothing().when(passwordPolicyMock).verifyIfPasswordCompliesWithPasswordPolicies(Mockito.anyString(), Mockito.anyString(), Mockito.anyLong()); - accountManagerImpl.validateUserPasswordAndUpdateIfNeeded(newPassword, userVoMock, currentPassword); + accountManagerImpl.validateUserPasswordAndUpdateIfNeeded(newPassword, userVoMock, currentPassword, false); Mockito.verify(accountManagerImpl, Mockito.times(1)).validateCurrentPassword(userVoMock, currentPassword); Mockito.verify(userVoMock, Mockito.times(1)).setPassword(expectedUserPasswordAfterEncoded); @@ -727,7 +826,7 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { Mockito.doThrow(new InvalidParameterValueException("")).when(passwordPolicyMock).verifyIfPasswordCompliesWithPasswordPolicies(Mockito.anyString(), Mockito.anyString(), Mockito.anyLong()); - accountManagerImpl.validateUserPasswordAndUpdateIfNeeded(newPassword, userVoMock, currentPassword); + accountManagerImpl.validateUserPasswordAndUpdateIfNeeded(newPassword, userVoMock, currentPassword, false); } private String configureUserMockAuthenticators(String newPassword) { @@ -1031,4 +1130,24 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { Assert.assertEquals(userAccountVOList.size(), userAccounts.size()); Assert.assertEquals(userAccountVOList.get(0), userAccounts.get(0)); } + + @Test + public void testDeleteWebhooksForAccount() { + try (MockedStatic mockedComponentContext = Mockito.mockStatic(ComponentContext.class)) { + WebhookHelper webhookHelper = Mockito.mock(WebhookHelper.class); + Mockito.doNothing().when(webhookHelper).deleteWebhooksForAccount(Mockito.anyLong()); + mockedComponentContext.when(() -> ComponentContext.getDelegateComponentOfType(WebhookHelper.class)) + .thenReturn(webhookHelper); + accountManagerImpl.deleteWebhooksForAccount(1L); + } + } + + @Test + public void testDeleteWebhooksForAccountNoBean() { + try (MockedStatic mockedComponentContext = Mockito.mockStatic(ComponentContext.class)) { + mockedComponentContext.when(() -> ComponentContext.getDelegateComponentOfType(WebhookHelper.class)) + .thenThrow(NoSuchBeanDefinitionException.class); + accountManagerImpl.deleteWebhooksForAccount(1L); + } + } } diff --git a/server/src/test/java/com/cloud/user/AccountManagetImplTestBase.java b/server/src/test/java/com/cloud/user/AccountManagetImplTestBase.java index 7f9fa488471..e97fddc0262 100644 --- a/server/src/test/java/com/cloud/user/AccountManagetImplTestBase.java +++ b/server/src/test/java/com/cloud/user/AccountManagetImplTestBase.java @@ -65,6 +65,7 @@ import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationSe import org.apache.cloudstack.engine.service.api.OrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.messagebus.MessageBus; +import org.apache.cloudstack.network.RoutedIpv4Manager; import org.apache.cloudstack.region.gslb.GlobalLoadBalancerRuleDao; import org.apache.cloudstack.resourcedetail.dao.UserDetailsDao; import org.junit.After; @@ -203,6 +204,8 @@ public class AccountManagetImplTestBase { UsageEventDao _usageEventDao; @Mock AccountService _accountService; + @Mock + RoutedIpv4Manager routedIpv4Manager; @Before public void setup() { diff --git a/server/src/test/java/com/cloud/user/DomainManagerImplTest.java b/server/src/test/java/com/cloud/user/DomainManagerImplTest.java index 829f0c9cb20..39155986941 100644 --- a/server/src/test/java/com/cloud/user/DomainManagerImplTest.java +++ b/server/src/test/java/com/cloud/user/DomainManagerImplTest.java @@ -49,6 +49,7 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; +import org.apache.cloudstack.network.RoutedIpv4Manager; import org.apache.cloudstack.region.RegionManager; import org.junit.Assert; import org.junit.Before; @@ -108,6 +109,8 @@ public class DomainManagerImplTest { DomainDetailsDao _domainDetailsDao; @Mock AnnotationDao annotationDao; + @Mock + RoutedIpv4Manager routedIpv4Manager; @Spy @InjectMocks diff --git a/server/src/test/java/com/cloud/user/MockAccountManagerImpl.java b/server/src/test/java/com/cloud/user/MockAccountManagerImpl.java index 334e1f33481..4cf7413f3f3 100644 --- a/server/src/test/java/com/cloud/user/MockAccountManagerImpl.java +++ b/server/src/test/java/com/cloud/user/MockAccountManagerImpl.java @@ -464,6 +464,10 @@ public class MockAccountManagerImpl extends ManagerBase implements Manager, Acco return null; } + @Override + public void checkApiAccess(Account account, String command) throws PermissionDeniedException { + + } @Override public void checkAccess(User user, ControlledEntity entity) throws PermissionDeniedException { @@ -483,4 +487,8 @@ public class MockAccountManagerImpl extends ManagerBase implements Manager, Acco public List getApiNameList() { return null; } + + @Override + public void validateUserPasswordAndUpdateIfNeeded(String newPassword, UserVO user, String currentPassword, boolean skipCurrentPassValidation) { + } } diff --git a/server/src/test/java/com/cloud/user/MockUsageEventDao.java b/server/src/test/java/com/cloud/user/MockUsageEventDao.java index 52e1b1a02b4..fb8c193780f 100644 --- a/server/src/test/java/com/cloud/user/MockUsageEventDao.java +++ b/server/src/test/java/com/cloud/user/MockUsageEventDao.java @@ -210,6 +210,11 @@ public class MockUsageEventDao implements UsageEventDao{ return 0; } + @Override + public int expunge(SearchCriteria sc, long limit) { + return 0; + } + @Override public void expunge() { @@ -332,4 +337,19 @@ public class MockUsageEventDao implements UsageEventDao{ public Pair, Integer> searchAndCount(SearchCriteria sc, Filter filter, boolean includeRemoved) { return null; } + + @Override + public int expunge(SearchCriteria sc, Filter filter) { + return 0; + } + + @Override + public int batchExpunge(SearchCriteria sc, Long batchSize) { + return 0; + } + + @Override + public int expungeList(List longs) { + return 0; + } } diff --git a/server/src/test/java/com/cloud/user/PasswordPolicyImplTest.java b/server/src/test/java/com/cloud/user/PasswordPolicyImplTest.java index dc3b7624fc4..561627a361a 100644 --- a/server/src/test/java/com/cloud/user/PasswordPolicyImplTest.java +++ b/server/src/test/java/com/cloud/user/PasswordPolicyImplTest.java @@ -160,4 +160,22 @@ public class PasswordPolicyImplTest { passwordPolicySpy.validateIfPasswordMatchesRegex("abcd123", "user", null); } + @Test + public void validateCombinationOfPolicies() { + Mockito.doReturn(2).when(passwordPolicySpy).getPasswordPolicyMinimumSpecialCharacters(null); + Mockito.doReturn(1).when(passwordPolicySpy).getPasswordPolicyMinimumUpperCaseLetters(null); + Mockito.doReturn(1).when(passwordPolicySpy).getPasswordPolicyMinimumLowerCaseLetters(null); + Mockito.doReturn(1).when(passwordPolicySpy).getPasswordPolicyMinimumDigits(null); + Mockito.doReturn(8).when(passwordPolicySpy).getPasswordPolicyMinimumLength(null); + Mockito.doReturn(false).when(passwordPolicySpy).getPasswordPolicyAllowPasswordToContainUsername(null); + + String password = "Ab1!@#cd"; + passwordPolicySpy.validateIfPasswordContainsTheMinimumNumberOfSpecialCharacters(2, password, null); + passwordPolicySpy.validateIfPasswordContainsTheMinimumNumberOfUpperCaseLetters(1, password, null); + passwordPolicySpy.validateIfPasswordContainsTheMinimumNumberOfLowerCaseLetters(1, password, null); + passwordPolicySpy.validateIfPasswordContainsTheMinimumNumberOfDigits(1, password, null); + passwordPolicySpy.validateIfPasswordContainsTheMinimumLength(password, "user", null); + passwordPolicySpy.validateIfPasswordContainsTheUsername(password, "user", null); + } + } diff --git a/server/src/test/java/com/cloud/vm/FirstFitPlannerTest.java b/server/src/test/java/com/cloud/vm/FirstFitPlannerTest.java index 0852c20010b..981649758cb 100644 --- a/server/src/test/java/com/cloud/vm/FirstFitPlannerTest.java +++ b/server/src/test/java/com/cloud/vm/FirstFitPlannerTest.java @@ -16,6 +16,49 @@ // under the License. package com.cloud.vm; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.inject.Inject; + +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.framework.config.ConfigDepot; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.ScopedConfigStorage; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.config.dao.ConfigurationGroupDao; +import org.apache.cloudstack.framework.config.dao.ConfigurationSubGroupDao; +import org.apache.cloudstack.framework.config.impl.ConfigDepotImpl; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.test.utils.SpringUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.ArgumentMatchers; +import org.mockito.Mockito; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.ComponentScan.Filter; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.FilterType; +import org.springframework.core.type.classreading.MetadataReader; +import org.springframework.core.type.classreading.MetadataReaderFactory; +import org.springframework.core.type.filter.TypeFilter; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import org.springframework.test.context.support.AnnotationConfigContextLoader; + import com.cloud.capacity.Capacity; import com.cloud.capacity.CapacityManager; import com.cloud.capacity.dao.CapacityDao; @@ -54,48 +97,6 @@ import com.cloud.utils.component.ComponentContext; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.UserVmDetailsDao; import com.cloud.vm.dao.VMInstanceDao; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.framework.config.ConfigDepot; -import org.apache.cloudstack.framework.config.ConfigKey; -import org.apache.cloudstack.framework.config.ScopedConfigStorage; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.framework.config.dao.ConfigurationGroupDao; -import org.apache.cloudstack.framework.config.dao.ConfigurationSubGroupDao; -import org.apache.cloudstack.framework.config.impl.ConfigDepotImpl; -import org.apache.cloudstack.framework.config.impl.ConfigurationVO; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.test.utils.SpringUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentMatchers; -import org.mockito.Mockito; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.ComponentScan; -import org.springframework.context.annotation.ComponentScan.Filter; -import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.FilterType; -import org.springframework.core.type.classreading.MetadataReader; -import org.springframework.core.type.classreading.MetadataReaderFactory; -import org.springframework.core.type.filter.TypeFilter; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; -import org.springframework.test.context.support.AnnotationConfigContextLoader; - -import javax.inject.Inject; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertTrue; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(loader = AnnotationConfigContextLoader.class) @@ -243,9 +244,8 @@ public class FirstFitPlannerTest { } private List initializeForClusterThresholdDisabled() { - ConfigurationVO config = mock(ConfigurationVO.class); - when(config.getValue()).thenReturn(String.valueOf(false)); - when(configDao.findById(DeploymentClusterPlanner.ClusterThresholdEnabled.key())).thenReturn(config); + when(configDepot.getConfigStringValue(DeploymentClusterPlanner.ClusterThresholdEnabled.key(), + ConfigKey.Scope.Global, null)).thenReturn(Boolean.FALSE.toString()); List clustersCrossingThreshold = new ArrayList(); clustersCrossingThreshold.add(3L); diff --git a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java index 323a1ed9416..8316c57d67d 100644 --- a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java +++ b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java @@ -487,7 +487,7 @@ public class UserVmManagerImplTest { Mockito.verify(userVmManagerImpl).getSecurityGroupIdList(updateVmCommand); Mockito.verify(userVmManagerImpl).updateVirtualMachine(nullable(Long.class), nullable(String.class), nullable(String.class), nullable(Boolean.class), - nullable(Boolean.class), nullable(Long.class), + nullable(Boolean.class), nullable(Boolean.class), nullable(Long.class), nullable(String.class), nullable(Long.class), nullable(String.class), nullable(Boolean.class), nullable(HTTPMethod.class), nullable(String.class), nullable(String.class), nullable(String.class), nullable(List.class), nullable(Map.class)); @@ -498,7 +498,7 @@ public class UserVmManagerImplTest { Mockito.doNothing().when(userVmManagerImpl).validateInputsAndPermissionForUpdateVirtualMachineCommand(updateVmCommand); Mockito.doReturn(new ArrayList()).when(userVmManagerImpl).getSecurityGroupIdList(updateVmCommand); Mockito.lenient().doReturn(Mockito.mock(UserVm.class)).when(userVmManagerImpl).updateVirtualMachine(Mockito.anyLong(), Mockito.anyString(), Mockito.anyString(), Mockito.anyBoolean(), - Mockito.anyBoolean(), Mockito.anyLong(), + Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyLong(), Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(), Mockito.anyBoolean(), Mockito.any(HTTPMethod.class), Mockito.anyString(), Mockito.anyString(), Mockito.anyString(), Mockito.anyList(), Mockito.anyMap()); } @@ -691,34 +691,6 @@ public class UserVmManagerImplTest { prepareAndRunResizeVolumeTest(2L, 10L, 20L, largerDisdkOffering, smallerDisdkOffering); } - @Test - public void validateDiskOfferingCheckForEncryption1Test() { - ServiceOfferingVO currentOffering = prepareOfferingsForEncryptionValidation(1L, true); - ServiceOfferingVO newOffering = prepareOfferingsForEncryptionValidation(2L, true); - userVmManagerImpl.validateDiskOfferingChecks(currentOffering, newOffering); - } - - @Test - public void validateDiskOfferingCheckForEncryption2Test() { - ServiceOfferingVO currentOffering = prepareOfferingsForEncryptionValidation(1L, false); - ServiceOfferingVO newOffering = prepareOfferingsForEncryptionValidation(2L, false); - userVmManagerImpl.validateDiskOfferingChecks(currentOffering, newOffering); - } - - @Test (expected = InvalidParameterValueException.class) - public void validateDiskOfferingCheckForEncryptionFail1Test() { - ServiceOfferingVO currentOffering = prepareOfferingsForEncryptionValidation(1L, false); - ServiceOfferingVO newOffering = prepareOfferingsForEncryptionValidation(2L, true); - userVmManagerImpl.validateDiskOfferingChecks(currentOffering, newOffering); - } - - @Test (expected = InvalidParameterValueException.class) - public void validateDiskOfferingCheckForEncryptionFail2Test() { - ServiceOfferingVO currentOffering = prepareOfferingsForEncryptionValidation(1L, true); - ServiceOfferingVO newOffering = prepareOfferingsForEncryptionValidation(2L, false); - userVmManagerImpl.validateDiskOfferingChecks(currentOffering, newOffering); - } - private void prepareAndRunResizeVolumeTest(Long expectedOfferingId, long expectedMinIops, long expectedMaxIops, DiskOfferingVO currentRootDiskOffering, DiskOfferingVO newRootDiskOffering) { long rootVolumeId = 1l; VolumeVO rootVolumeOfVm = Mockito.mock(VolumeVO.class); @@ -742,20 +714,6 @@ public class UserVmManagerImplTest { return newRootDiskOffering; } - private ServiceOfferingVO prepareOfferingsForEncryptionValidation(long diskOfferingId, boolean encryption) { - ServiceOfferingVO svcOffering = Mockito.mock(ServiceOfferingVO.class); - DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class); - - Mockito.when(svcOffering.getDiskOfferingId()).thenReturn(diskOfferingId); - Mockito.when(diskOffering.getEncrypt()).thenReturn(encryption); - - // Be aware - Multiple calls with the same disk offering ID could conflict - Mockito.when(diskOfferingDao.findByIdIncludingRemoved(diskOfferingId)).thenReturn(diskOffering); - Mockito.when(diskOfferingDao.findById(diskOfferingId)).thenReturn(diskOffering); - - return svcOffering; - } - @Test (expected = CloudRuntimeException.class) public void testUserDataDenyOverride() { Long userDataId = 1L; @@ -1566,6 +1524,46 @@ public class UserVmManagerImplTest { } @Test + public void testValidateStrictHostTagCheckPass() { + ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class); + VMTemplateVO template = Mockito.mock(VMTemplateVO.class); + + VMInstanceVO vm = Mockito.mock(VMInstanceVO.class); + HostVO destinationHostVO = Mockito.mock(HostVO.class); + + Mockito.when(_serviceOfferingDao.findByIdIncludingRemoved(1L)).thenReturn(serviceOffering); + Mockito.when(templateDao.findByIdIncludingRemoved(2L)).thenReturn(template); + + Mockito.when(vm.getServiceOfferingId()).thenReturn(1L); + Mockito.when(vm.getTemplateId()).thenReturn(2L); + + Mockito.when(destinationHostVO.checkHostServiceOfferingAndTemplateTags(Mockito.any(ServiceOffering.class), Mockito.any(VirtualMachineTemplate.class), Mockito.anySet())).thenReturn(true); + + userVmManagerImpl.validateStrictHostTagCheck(vm, destinationHostVO); + + Mockito.verify( + destinationHostVO, Mockito.times(1) + ).checkHostServiceOfferingAndTemplateTags(Mockito.any(ServiceOffering.class), Mockito.any(VirtualMachineTemplate.class), Mockito.anySet()); + } + + @Test(expected = InvalidParameterValueException.class) + public void testValidateStrictHostTagCheckFail() { + ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class); + VMTemplateVO template = Mockito.mock(VMTemplateVO.class); + + VMInstanceVO vm = Mockito.mock(VMInstanceVO.class); + HostVO destinationHostVO = Mockito.mock(HostVO.class); + + Mockito.when(_serviceOfferingDao.findByIdIncludingRemoved(1L)).thenReturn(serviceOffering); + Mockito.when(templateDao.findByIdIncludingRemoved(2L)).thenReturn(template); + + Mockito.when(vm.getServiceOfferingId()).thenReturn(1L); + Mockito.when(vm.getTemplateId()).thenReturn(2L); + + Mockito.when(destinationHostVO.checkHostServiceOfferingAndTemplateTags(Mockito.any(ServiceOffering.class), Mockito.any(VirtualMachineTemplate.class), Mockito.anySet())).thenReturn(false); + userVmManagerImpl.validateStrictHostTagCheck(vm, destinationHostVO); + } + public void testGetRootVolumeSizeForVmRestore() { VMTemplateVO template = Mockito.mock(VMTemplateVO.class); Mockito.when(template.getSize()).thenReturn(10L * GiB_TO_BYTES); @@ -1597,4 +1595,40 @@ public class UserVmManagerImplTest { Long actualSize = userVmManagerImpl.getRootVolumeSizeForVmRestore(null, template, userVm, diskOffering, details, false); Assert.assertEquals(20 * GiB_TO_BYTES, actualSize.longValue()); } + + @Test + public void checkExpungeVMPermissionTestAccountIsNotAdminConfigFalseThrowsPermissionDeniedException () { + Mockito.doReturn(false).when(accountManager).isAdmin(Mockito.anyLong()); + Mockito.doReturn(false).when(userVmManagerImpl).getConfigAllowUserExpungeRecoverVm(Mockito.anyLong()); + + Assert.assertThrows(PermissionDeniedException.class, () -> userVmManagerImpl.checkExpungeVmPermission(accountMock)); + } + @Test + public void checkExpungeVmPermissionTestAccountIsNotAdminConfigTrueNoApiAccessThrowsPermissionDeniedException () { + Mockito.doReturn(false).when(accountManager).isAdmin(Mockito.anyLong()); + Mockito.doReturn(true).when(userVmManagerImpl).getConfigAllowUserExpungeRecoverVm(Mockito.anyLong()); + Mockito.doThrow(PermissionDeniedException.class).when(accountManager).checkApiAccess(accountMock, "expungeVirtualMachine"); + + Assert.assertThrows(PermissionDeniedException.class, () -> userVmManagerImpl.checkExpungeVmPermission(accountMock)); + } + @Test + public void checkExpungeVmPermissionTestAccountIsNotAdminConfigTrueHasApiAccessReturnNothing () { + Mockito.doReturn(false).when(accountManager).isAdmin(Mockito.anyLong()); + Mockito.doReturn(true).when(userVmManagerImpl).getConfigAllowUserExpungeRecoverVm(Mockito.anyLong()); + + userVmManagerImpl.checkExpungeVmPermission(accountMock); + } + @Test + public void checkExpungeVmPermissionTestAccountIsAdminNoApiAccessThrowsPermissionDeniedException () { + Mockito.doReturn(true).when(accountManager).isAdmin(Mockito.anyLong()); + Mockito.doThrow(PermissionDeniedException.class).when(accountManager).checkApiAccess(accountMock, "expungeVirtualMachine"); + + Assert.assertThrows(PermissionDeniedException.class, () -> userVmManagerImpl.checkExpungeVmPermission(accountMock)); + } + @Test + public void checkExpungeVmPermissionTestAccountIsAdminHasApiAccessReturnNothing () { + Mockito.doReturn(true).when(accountManager).isAdmin(Mockito.anyLong()); + + userVmManagerImpl.checkExpungeVmPermission(accountMock); + } } diff --git a/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java b/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java index 2a1f4fffbf8..8f05b716725 100644 --- a/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java +++ b/server/src/test/java/com/cloud/vpc/MockConfigurationManagerImpl.java @@ -547,7 +547,8 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu Integer networkRate, Map> serviceProviderMap, boolean isDefault, GuestType type, boolean systemOnly, Long serviceOfferingId, boolean conserveMode, Map> serviceCapabilityMap, boolean specifyIpRanges, boolean isPersistent, Map details, boolean egressDefaultPolicy, Integer maxconn, boolean enableKeepAlive, Boolean forVpc, - Boolean forTungsten, boolean forNsx, String mode, List domainIds, List zoneIds, boolean enableOffering, NetUtils.InternetProtocol internetProtocol) { + Boolean forTungsten, boolean forNsx, NetworkOffering.NetworkMode networkMode, List domainIds, List zoneIds, boolean enableOffering, NetUtils.InternetProtocol internetProtocol, + NetworkOffering.RoutingMode routingMode, boolean specifyAsNumber) { // TODO Auto-generated method stub return null; } diff --git a/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java b/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java index 4eaf31e6bb8..7533767c00b 100644 --- a/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java +++ b/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java @@ -25,6 +25,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import com.cloud.dc.DataCenter; +import com.cloud.hypervisor.Hypervisor; import com.cloud.network.PublicIpQuarantine; import com.cloud.network.VirtualRouterProvider; import com.cloud.utils.fsm.NoTransitionException; @@ -222,6 +223,13 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkOrches return null; } + @Override + public Network createGuestNetwork(long networkOfferingId, String name, String displayText, Account owner, + PhysicalNetwork physicalNetwork, long zoneId, ACLType aclType) throws InsufficientCapacityException, + ConcurrentOperationException, ResourceAllocationException { + return null; + } + /* (non-Javadoc) * @see com.cloud.network.NetworkService#searchForNetworks(com.cloud.api.commands.ListNetworksCmd) */ @@ -633,6 +641,11 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkOrches return null; } + @Override + public List getNicProfiles(Long vmId, Hypervisor.HypervisorType hypervisorType) { + return List.of(); + } + @Override public Map getSystemVMAccessDetails(VirtualMachine vm) { return null; @@ -683,7 +696,7 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkOrches public Network createGuestNetwork(long networkOfferingId, String name, String displayText, String gateway, String cidr, String vlanId, boolean bypassVlanOverlapCheck, String networkDomain, Account owner, Long domainId, PhysicalNetwork physicalNetwork, long zoneId, ACLType aclType, Boolean subdomainAccess, Long vpcId, String gatewayv6, String cidrv6, Boolean displayNetworkEnabled, String isolatedPvlan, Network.PVlanType isolatedPvlanType, String externalId, String routerIp, String routerIpv6, - String ip4Dns1, String ip4Dns2, String ip6Dns1, String ip6Dns2, Pair vrIfaceMTUs) throws ConcurrentOperationException, ResourceAllocationException { + String ip4Dns1, String ip4Dns2, String ip6Dns1, String ip6Dns2, Pair vrIfaceMTUs, Integer networkCidrSize) throws ConcurrentOperationException, ResourceAllocationException { // TODO Auto-generated method stub return null; } @@ -1113,4 +1126,8 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkOrches public boolean handleCksIsoOnNetworkVirtualRouter(Long virtualRouterId, boolean mount) { return false; } + + @Override + public void expungeLbVmRefs(List vmIds, Long batchSize) { + } } diff --git a/server/src/test/java/com/cloud/vpc/MockNetworkModelImpl.java b/server/src/test/java/com/cloud/vpc/MockNetworkModelImpl.java index ebee9fec02d..3b41a56dc0f 100644 --- a/server/src/test/java/com/cloud/vpc/MockNetworkModelImpl.java +++ b/server/src/test/java/com/cloud/vpc/MockNetworkModelImpl.java @@ -248,7 +248,7 @@ public class MockNetworkModelImpl extends ManagerBase implements NetworkModel { * @see com.cloud.network.NetworkModel#getNetworkWithSGWithFreeIPs(java.lang.Long) */ @Override - public NetworkVO getNetworkWithSGWithFreeIPs(Long zoneId) { + public NetworkVO getNetworkWithSGWithFreeIPs(Account account, Long zoneId) { // TODO Auto-generated method stub return null; } @@ -352,6 +352,11 @@ public class MockNetworkModelImpl extends ManagerBase implements NetworkModel { return false; } + @Override + public boolean isAnyServiceSupportedInNetwork(long networkId, Provider provider, Service... services) { + return false; + } + /* (non-Javadoc) * @see com.cloud.network.NetworkModel#isProviderEnabledInPhysicalNetwork(long, java.lang.String) */ @@ -857,6 +862,11 @@ public class MockNetworkModelImpl extends ManagerBase implements NetworkModel { // TODO Auto-generated method stub } + @Override + public void checkIp6CidrSizeEqualTo64(String ip6Cidr) throws InvalidParameterValueException { + // TODO Auto-generated method stub + } + @Override public void checkRequestedIpAddresses(long networkId, IpAddresses ips) throws InvalidParameterValueException { // TODO Auto-generated method stub @@ -949,4 +959,15 @@ public class MockNetworkModelImpl extends ManagerBase implements NetworkModel { @Override public void verifyIp6DnsPair(String ip4Dns1, String ip4Dns2) {} + + @Override + public boolean isSecurityGroupSupportedForZone(Long zoneId) { + return false; + } + + @Override + public boolean checkSecurityGroupSupportForNetwork(Account account, DataCenter zone, + List networkIds, List securityGroupsIds) { + return false; + } } diff --git a/server/src/test/java/com/cloud/vpc/MockVpcVirtualNetworkApplianceManager.java b/server/src/test/java/com/cloud/vpc/MockVpcVirtualNetworkApplianceManager.java index 3949fa8e6ca..d525c6c51d7 100644 --- a/server/src/test/java/com/cloud/vpc/MockVpcVirtualNetworkApplianceManager.java +++ b/server/src/test/java/com/cloud/vpc/MockVpcVirtualNetworkApplianceManager.java @@ -17,9 +17,20 @@ package com.cloud.vpc; +import java.util.List; +import java.util.Map; + +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.api.command.admin.router.UpgradeRouterCmd; +import org.apache.cloudstack.api.command.admin.router.UpgradeRouterTemplateCmd; +import org.springframework.stereotype.Component; + +import com.cloud.deploy.DeploymentPlanner; import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.OperationTimedoutException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.Network; import com.cloud.network.RemoteAccessVpn; @@ -34,14 +45,8 @@ import com.cloud.utils.Pair; import com.cloud.utils.component.ManagerBase; import com.cloud.vm.DomainRouterVO; import com.cloud.vm.Nic; +import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; -import org.apache.cloudstack.api.command.admin.router.UpgradeRouterCmd; -import org.apache.cloudstack.api.command.admin.router.UpgradeRouterTemplateCmd; -import org.springframework.stereotype.Component; - -import javax.naming.ConfigurationException; -import java.util.List; -import java.util.Map; @Component public class MockVpcVirtualNetworkApplianceManager extends ManagerBase implements VpcVirtualNetworkApplianceManager, VpcVirtualNetworkApplianceService { @@ -141,6 +146,13 @@ public class MockVpcVirtualNetworkApplianceManager extends ManagerBase implement return null; } + @Override + public void startRouterForHA(VirtualMachine vm, Map params, + DeploymentPlanner planner) throws InsufficientCapacityException, ResourceUnavailableException, + ConcurrentOperationException, OperationTimedoutException { + + } + /* (non-Javadoc) * @see com.cloud.network.VirtualNetworkApplianceService#destroyRouter(long, com.cloud.user.Account, java.lang.Long) */ @@ -204,6 +216,20 @@ public class MockVpcVirtualNetworkApplianceManager extends ManagerBase implement return false; } + @Override + public boolean stopKeepAlivedOnRouter(VirtualRouter router, + Network network) throws ConcurrentOperationException, ResourceUnavailableException { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean startKeepAlivedOnRouter(VirtualRouter router, + Network network) throws ConcurrentOperationException, ResourceUnavailableException { + // TODO Auto-generated method stub + return false; + } + /* (non-Javadoc) * @see com.cloud.network.router.VpcVirtualNetworkApplianceManager#destroyPrivateGateway(com.cloud.network.vpc.PrivateGateway, com.cloud.network.router.VirtualRouter) */ diff --git a/server/src/test/java/com/cloud/vpc/dao/MockNetworkDaoImpl.java b/server/src/test/java/com/cloud/vpc/dao/MockNetworkDaoImpl.java index fe0b3ea23c1..490f2ccba46 100644 --- a/server/src/test/java/com/cloud/vpc/dao/MockNetworkDaoImpl.java +++ b/server/src/test/java/com/cloud/vpc/dao/MockNetworkDaoImpl.java @@ -264,4 +264,5 @@ public class MockNetworkDaoImpl extends GenericDaoBase implemen public List getAllPersistentNetworksFromZone(long dataCenterId) { return null; } + } diff --git a/server/src/test/java/com/cloud/vpc/dao/MockNetworkServiceMapDaoImpl.java b/server/src/test/java/com/cloud/vpc/dao/MockNetworkServiceMapDaoImpl.java index d5192644e86..6746c5ecbf8 100644 --- a/server/src/test/java/com/cloud/vpc/dao/MockNetworkServiceMapDaoImpl.java +++ b/server/src/test/java/com/cloud/vpc/dao/MockNetworkServiceMapDaoImpl.java @@ -39,6 +39,11 @@ public class MockNetworkServiceMapDaoImpl extends GenericDaoBase public boolean isIpv6Supported(long offeringId) { return false; } + + @Override + public boolean isRoutedVpc(long offeringId) { + return false; + } } diff --git a/server/src/test/java/org/apache/cloudstack/acl/RoleManagerImplTest.java b/server/src/test/java/org/apache/cloudstack/acl/RoleManagerImplTest.java index e596601325e..5d9ee268d8b 100644 --- a/server/src/test/java/org/apache/cloudstack/acl/RoleManagerImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/acl/RoleManagerImplTest.java @@ -214,7 +214,7 @@ public class RoleManagerImplTest { String roleName = "roleName"; List roles = new ArrayList<>(); Pair, Integer> toBeReturned = new Pair(roles, 0); - Mockito.doReturn(toBeReturned).when(roleDaoMock).findAllByName(roleName, null, null, null, false); + Mockito.doReturn(toBeReturned).when(roleDaoMock).findAllByName(roleName, null, null, null, null, false); roleManagerImpl.findRolesByName(roleName); Mockito.verify(roleManagerImpl).removeRolesIfNeeded(roles); @@ -345,7 +345,7 @@ public class RoleManagerImplTest { List roles = new ArrayList<>(); roles.add(Mockito.mock(Role.class)); Pair, Integer> toBeReturned = new Pair(roles, 1); - Mockito.doReturn(toBeReturned).when(roleDaoMock).findAllByRoleType(RoleType.Admin, null, null, true); + Mockito.doReturn(toBeReturned).when(roleDaoMock).findAllByRoleType(RoleType.Admin, null, null, null, true); List returnedRoles = roleManagerImpl.findRolesByType(RoleType.Admin); Assert.assertEquals(1, returnedRoles.size()); @@ -360,7 +360,7 @@ public class RoleManagerImplTest { List roles = new ArrayList<>(); roles.add(Mockito.mock(Role.class)); Pair, Integer> toBeReturned = new Pair(roles, 1); - Mockito.doReturn(toBeReturned).when(roleDaoMock).findAllByRoleType(RoleType.User, null, null, true); + Mockito.doReturn(toBeReturned).when(roleDaoMock).findAllByRoleType(RoleType.User, null, null, null, true); List returnedRoles = roleManagerImpl.findRolesByType(RoleType.User); Assert.assertEquals(1, returnedRoles.size()); diff --git a/server/src/test/java/org/apache/cloudstack/backup/BackupManagerTest.java b/server/src/test/java/org/apache/cloudstack/backup/BackupManagerTest.java index 7726af09361..3bf1fb97e4d 100644 --- a/server/src/test/java/org/apache/cloudstack/backup/BackupManagerTest.java +++ b/server/src/test/java/org/apache/cloudstack/backup/BackupManagerTest.java @@ -47,6 +47,7 @@ import java.util.Collections; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.times; import static org.mockito.Mockito.when; @@ -158,65 +159,88 @@ public class BackupManagerTest { @Test public void restoreBackedUpVolumeTestHostIpAndDatastoreUuid() { BackupVO backupVO = new BackupVO(); + VMInstanceVO vm = Mockito.mock(VMInstanceVO.class); String volumeUuid = "5f4ed903-ac23-4f8a-b595-69c73c40593f"; + String vmName = "i-2-3-VM"; + VirtualMachine.State vmState = VirtualMachine.State.Running; + Mockito.when(vm.getName()).thenReturn(vmName); + Mockito.when(vm.getState()).thenReturn(vmState); + Pair vmNameAndState = new Pair<>("i-2-3-VM", VirtualMachine.State.Running); Mockito.when(backupProvider.restoreBackedUpVolume(Mockito.any(), Mockito.eq(volumeUuid), - Mockito.eq("127.0.0.1"), Mockito.eq("e9804933-8609-4de3-bccc-6278072a496c"))).thenReturn(new Pair(Boolean.TRUE, "Success")); - Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeUuid, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues); + Mockito.eq("127.0.0.1"), Mockito.eq("e9804933-8609-4de3-bccc-6278072a496c"), Mockito.eq(vmNameAndState))).thenReturn(new Pair(Boolean.TRUE, "Success")); + Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeUuid, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); assertEquals(Boolean.TRUE, restoreBackedUpVolume.first()); assertEquals("Success", restoreBackedUpVolume.second()); Mockito.verify(backupProvider, times(1)).restoreBackedUpVolume(Mockito.any(), Mockito.anyString(), - Mockito.anyString(), Mockito.anyString()); + Mockito.anyString(), Mockito.anyString(), any(Pair.class)); } @Test public void restoreBackedUpVolumeTestHostIpAndDatastoreName() { BackupVO backupVO = new BackupVO(); + VMInstanceVO vm = Mockito.mock(VMInstanceVO.class); String volumeUuid = "5f4ed903-ac23-4f8a-b595-69c73c40593f"; - + String vmName = "i-2-3-VM"; + VirtualMachine.State vmState = VirtualMachine.State.Running; + Mockito.when(vm.getName()).thenReturn(vmName); + Mockito.when(vm.getState()).thenReturn(vmState); + Pair vmNameAndState = new Pair<>("i-2-3-VM", VirtualMachine.State.Running); Mockito.when(backupProvider.restoreBackedUpVolume(Mockito.any(), Mockito.eq(volumeUuid), - Mockito.eq("127.0.0.1"), Mockito.eq("datastore-name"))).thenReturn(new Pair(Boolean.TRUE, "Success2")); - Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeUuid, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues); + Mockito.eq("127.0.0.1"), Mockito.eq("datastore-name"), Mockito.eq(vmNameAndState))).thenReturn(new Pair(Boolean.TRUE, "Success2")); + Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeUuid, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); assertEquals(Boolean.TRUE, restoreBackedUpVolume.first()); assertEquals("Success2", restoreBackedUpVolume.second()); Mockito.verify(backupProvider, times(2)).restoreBackedUpVolume(Mockito.any(), Mockito.anyString(), - Mockito.anyString(), Mockito.anyString()); + Mockito.anyString(), Mockito.anyString(), any(Pair.class)); } @Test public void restoreBackedUpVolumeTestHostNameAndDatastoreUuid() { BackupVO backupVO = new BackupVO(); + VMInstanceVO vm = Mockito.mock(VMInstanceVO.class); String volumeUuid = "5f4ed903-ac23-4f8a-b595-69c73c40593f"; + String vmName = "i-2-3-VM"; + VirtualMachine.State vmState = VirtualMachine.State.Running; + Mockito.when(vm.getName()).thenReturn(vmName); + Mockito.when(vm.getState()).thenReturn(vmState); + Pair vmNameAndState = new Pair<>("i-2-3-VM", VirtualMachine.State.Running); Mockito.when(backupProvider.restoreBackedUpVolume(Mockito.any(), Mockito.eq(volumeUuid), - Mockito.eq("hostname"), Mockito.eq("e9804933-8609-4de3-bccc-6278072a496c"))).thenReturn(new Pair(Boolean.TRUE, "Success3")); - Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeUuid, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues); + Mockito.eq("hostname"), Mockito.eq("e9804933-8609-4de3-bccc-6278072a496c"), Mockito.eq(vmNameAndState) )).thenReturn(new Pair(Boolean.TRUE, "Success3")); + Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeUuid, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); assertEquals(Boolean.TRUE, restoreBackedUpVolume.first()); assertEquals("Success3", restoreBackedUpVolume.second()); Mockito.verify(backupProvider, times(3)).restoreBackedUpVolume(Mockito.any(), Mockito.anyString(), - Mockito.anyString(), Mockito.anyString()); + Mockito.anyString(), Mockito.anyString(), any(Pair.class)); } @Test public void restoreBackedUpVolumeTestHostAndDatastoreName() { BackupVO backupVO = new BackupVO(); + VMInstanceVO vm = Mockito.mock(VMInstanceVO.class); String volumeUuid = "5f4ed903-ac23-4f8a-b595-69c73c40593f"; + String vmName = "i-2-3-VM"; + VirtualMachine.State vmState = VirtualMachine.State.Running; + Mockito.when(vm.getName()).thenReturn(vmName); + Mockito.when(vm.getState()).thenReturn(vmState); + Pair vmNameAndState = new Pair<>("i-2-3-VM", VirtualMachine.State.Running); Mockito.when(backupProvider.restoreBackedUpVolume(Mockito.any(), Mockito.eq(volumeUuid), - Mockito.eq("hostname"), Mockito.eq("datastore-name"))).thenReturn(new Pair(Boolean.TRUE, "Success4")); - Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeUuid, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues); + Mockito.eq("hostname"), Mockito.eq("datastore-name"), Mockito.eq(vmNameAndState))).thenReturn(new Pair(Boolean.TRUE, "Success4")); + Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeUuid, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); assertEquals(Boolean.TRUE, restoreBackedUpVolume.first()); assertEquals("Success4", restoreBackedUpVolume.second()); Mockito.verify(backupProvider, times(4)).restoreBackedUpVolume(Mockito.any(), Mockito.anyString(), - Mockito.anyString(), Mockito.anyString()); + Mockito.anyString(), Mockito.anyString(), any(Pair.class)); } @Test diff --git a/server/src/test/java/org/apache/cloudstack/cluster/ClusterDrsServiceImplTest.java b/server/src/test/java/org/apache/cloudstack/cluster/ClusterDrsServiceImplTest.java index ceddf6e1c48..cca6d673dfe 100644 --- a/server/src/test/java/org/apache/cloudstack/cluster/ClusterDrsServiceImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/cluster/ClusterDrsServiceImplTest.java @@ -73,6 +73,7 @@ import java.util.Map; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; @RunWith(MockitoJUnitRunner.class) public class ClusterDrsServiceImplTest { @@ -353,6 +354,7 @@ public class ClusterDrsServiceImplTest { Mockito.when(cluster.getId()).thenReturn(1L); HostVO destHost = Mockito.mock(HostVO.class); + Mockito.when(destHost.getClusterId()).thenReturn(1L); HostVO host = Mockito.mock(HostVO.class); Mockito.when(host.getId()).thenReturn(2L); @@ -386,13 +388,9 @@ public class ClusterDrsServiceImplTest { } Mockito.when(managementServer.listHostsForMigrationOfVM(vm1, 0L, 500L, null, vmList)).thenReturn( - new Ternary, Integer>, List, Map>( - new Pair<>(List.of(destHost), 1), List.of(destHost), Map.of(destHost, - false))); + new Ternary<>(new Pair<>(List.of(destHost), 1), List.of(destHost), Map.of(destHost, false))); Mockito.when(managementServer.listHostsForMigrationOfVM(vm2, 0L, 500L, null, vmList)).thenReturn( - new Ternary, Integer>, List, Map>( - new Pair<>(List.of(destHost), 1), List.of(destHost), Map.of(destHost, - false))); + new Ternary<>(new Pair<>(List.of(destHost), 1), List.of(destHost), Map.of(destHost, false))); Mockito.when(balancedAlgorithm.getMetrics(cluster.getId(), vm1, serviceOffering, destHost, new HashMap<>(), new HashMap<>(), false)).thenReturn(new Ternary<>(1.0, 0.5, 1.5)); @@ -406,6 +404,56 @@ public class ClusterDrsServiceImplTest { assertEquals(vm1, bestMigration.first()); } + @Test + public void testGetBestMigrationDifferentCluster() throws ConfigurationException { + ClusterVO cluster = Mockito.mock(ClusterVO.class); + Mockito.when(cluster.getId()).thenReturn(1L); + + HostVO destHost = Mockito.mock(HostVO.class); + Mockito.when(destHost.getClusterId()).thenReturn(2L); + + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(host.getId()).thenReturn(2L); + + VMInstanceVO vm1 = Mockito.mock(VMInstanceVO.class); + Mockito.when(vm1.getId()).thenReturn(1L); + Mockito.when(vm1.getType()).thenReturn(VirtualMachine.Type.User); + Mockito.when(vm1.getState()).thenReturn(VirtualMachine.State.Running); + Mockito.when(vm1.getDetails()).thenReturn(Collections.emptyMap()); + + VMInstanceVO vm2 = Mockito.mock(VMInstanceVO.class); + Mockito.when(vm2.getId()).thenReturn(2L); + Mockito.when(vm2.getType()).thenReturn(VirtualMachine.Type.User); + Mockito.when(vm2.getState()).thenReturn(VirtualMachine.State.Running); + Mockito.when(vm2.getDetails()).thenReturn(Collections.emptyMap()); + + List vmList = new ArrayList<>(); + vmList.add(vm1); + vmList.add(vm2); + + Map> hostVmMap = new HashMap<>(); + hostVmMap.put(host.getId(), new ArrayList<>()); + hostVmMap.get(host.getId()).add(vm1); + hostVmMap.get(host.getId()).add(vm2); + + Map vmIdServiceOfferingMap = new HashMap<>(); + + ServiceOffering serviceOffering = Mockito.mock(ServiceOffering.class); + for (VirtualMachine vm : vmList) { + vmIdServiceOfferingMap.put(vm.getId(), serviceOffering); + } + + Mockito.when(managementServer.listHostsForMigrationOfVM(vm1, 0L, 500L, null, vmList)).thenReturn( + new Ternary<>(new Pair<>(List.of(destHost), 1), List.of(destHost), Map.of(destHost, false))); + Mockito.when(managementServer.listHostsForMigrationOfVM(vm2, 0L, 500L, null, vmList)).thenReturn( + new Ternary<>(new Pair<>(List.of(destHost), 1), List.of(destHost), Map.of(destHost, false))); + Pair bestMigration = clusterDrsService.getBestMigration(cluster, balancedAlgorithm, + vmList, vmIdServiceOfferingMap, new HashMap<>(), new HashMap<>()); + + assertNull(bestMigration.second()); + assertNull(bestMigration.first()); + } + @Test public void testSavePlan() { Mockito.when(drsPlanDao.persist(Mockito.any(ClusterDrsPlanVO.class))).thenReturn( diff --git a/server/src/test/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinitionTest.java b/server/src/test/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinitionTest.java index 679324fed2f..b5c842b8806 100644 --- a/server/src/test/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinitionTest.java +++ b/server/src/test/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinitionTest.java @@ -773,8 +773,8 @@ public class RouterDeploymentDefinitionTest extends RouterDeploymentDefinitionTe protected void driveTestPrepareDeployment(final boolean isRedundant, final boolean isPublicNw) { // Prepare when(mockNw.isRedundant()).thenReturn(isRedundant); - when(mockNetworkModel.isProviderSupportServiceInNetwork( - NW_ID_1, Service.SourceNat, Provider.VirtualRouter)).thenReturn(isPublicNw); + when(mockNetworkModel.isAnyServiceSupportedInNetwork( + NW_ID_1, Provider.VirtualRouter, Service.SourceNat, Service.Gateway)).thenReturn(isPublicNw); // Execute final boolean canProceedDeployment = deployment.prepareDeployment(); // Assert diff --git a/server/src/test/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinitionTest.java b/server/src/test/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinitionTest.java index a355ad21f2b..d3ab6d8904b 100644 --- a/server/src/test/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinitionTest.java +++ b/server/src/test/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinitionTest.java @@ -269,7 +269,7 @@ public class VpcRouterDeploymentDefinitionTest extends RouterDeploymentDefinitio public void testFindSourceNatIP() throws InsufficientAddressCapacityException, ConcurrentOperationException { // Prepare final PublicIp publicIp = mock(PublicIp.class); - when(vpcMgr.assignSourceNatIpAddressToVpc(mockOwner, mockVpc)).thenReturn(publicIp); + when(vpcMgr.assignSourceNatIpAddressToVpc(mockOwner, mockVpc, null)).thenReturn(publicIp); deployment.isPublicNetwork = true; // Execute diff --git a/server/src/test/java/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java b/server/src/test/java/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java index 838bb3dadcb..25b4bdda45f 100644 --- a/server/src/test/java/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java +++ b/server/src/test/java/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java @@ -138,7 +138,7 @@ public class CreateNetworkOfferingTest extends TestCase { public void createSharedNtwkOffWithVlan() { NetworkOfferingVO off = configMgr.createNetworkOffering("shared", "shared", TrafficType.Guest, null, true, Availability.Optional, 200, null, false, Network.GuestType.Shared, false, - null, false, null, true, false, null, false, null, true, false, false, false, null,null, null, false, null); + null, false, null, true, false, null, false, null, true, false, false, false, null,null, null, false, null, null, false); assertNotNull("Shared network offering with specifyVlan=true failed to create ", off); } @@ -146,7 +146,7 @@ public class CreateNetworkOfferingTest extends TestCase { public void createSharedNtwkOffWithNoVlan() { NetworkOfferingVO off = configMgr.createNetworkOffering("shared", "shared", TrafficType.Guest, null, false, Availability.Optional, 200, null, false, Network.GuestType.Shared, - false, null, false, null, true, false, null, false, null, true, false, false, false, null, null,null, false, null); + false, null, false, null, true, false, null, false, null, true, false, false, false, null, null,null, false, null, null, false); assertNotNull("Shared network offering with specifyVlan=false was created", off); } @@ -154,7 +154,7 @@ public class CreateNetworkOfferingTest extends TestCase { public void createSharedNtwkOffWithSpecifyIpRanges() { NetworkOfferingVO off = configMgr.createNetworkOffering("shared", "shared", TrafficType.Guest, null, true, Availability.Optional, 200, null, false, Network.GuestType.Shared, false, - null, false, null, true, false, null, false, null, true, false, false, false, null,null, null, false, null); + null, false, null, true, false, null, false, null, true, false, false, false, null,null, null, false, null, null, false); assertNotNull("Shared network offering with specifyIpRanges=true failed to create ", off); } @@ -163,7 +163,7 @@ public class CreateNetworkOfferingTest extends TestCase { public void createSharedNtwkOffWithoutSpecifyIpRanges() { NetworkOfferingVO off = configMgr.createNetworkOffering("shared", "shared", TrafficType.Guest, null, true, Availability.Optional, 200, null, false, Network.GuestType.Shared, - false, null, false, null, false, false, null, false, null, true, false, false, false, null,null, null, false, null); + false, null, false, null, false, false, null, false, null, true, false, false, false, null,null, null, false, null, null, false); assertNull("Shared network offering with specifyIpRanges=false was created", off); } @@ -176,7 +176,7 @@ public class CreateNetworkOfferingTest extends TestCase { serviceProviderMap.put(Network.Service.SourceNat, vrProvider); NetworkOfferingVO off = configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, false, Availability.Optional, 200, serviceProviderMap, false, - Network.GuestType.Isolated, false, null, false, null, false, false, null, false, null, true, false, false, false, null, null, null, false, null); + Network.GuestType.Isolated, false, null, false, null, false, false, null, false, null, true, false, false, false, null, null, null, false, null, null, false); assertNotNull("Isolated network offering with specifyIpRanges=false failed to create ", off); } @@ -189,7 +189,7 @@ public class CreateNetworkOfferingTest extends TestCase { serviceProviderMap.put(Network.Service.SourceNat, vrProvider); NetworkOfferingVO off = configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, true, Availability.Optional, 200, serviceProviderMap, false, - Network.GuestType.Isolated, false, null, false, null, false, false, null, false, null, true, false, false, false, null,null, null, false, null); + Network.GuestType.Isolated, false, null, false, null, false, false, null, false, null, true, false, false, false, null,null, null, false, null, null, false); assertNotNull("Isolated network offering with specifyVlan=true wasn't created", off); } @@ -202,7 +202,7 @@ public class CreateNetworkOfferingTest extends TestCase { serviceProviderMap.put(Network.Service.SourceNat, vrProvider); NetworkOfferingVO off = configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, false, Availability.Optional, 200, serviceProviderMap, false, - Network.GuestType.Isolated, false, null, false, null, true, false, null, false, null, true, false, false, false, null,null, null, false, null); + Network.GuestType.Isolated, false, null, false, null, true, false, null, false, null, true, false, false, false, null,null, null, false, null, null, false); assertNull("Isolated network offering with specifyIpRanges=true and source nat service enabled, was created", off); } @@ -213,7 +213,7 @@ public class CreateNetworkOfferingTest extends TestCase { Set vrProvider = new HashSet(); NetworkOfferingVO off = configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, false, Availability.Optional, 200, serviceProviderMap, false, - Network.GuestType.Isolated, false, null, false, null, true, false, null, false, null, true, false, false, false, null,null, null, false, null); + Network.GuestType.Isolated, false, null, false, null, true, false, null, false, null, true, false, false, false, null,null, null, false, null, null, false); assertNotNull("Isolated network offering with specifyIpRanges=true and with no sourceNatService, failed to create", off); } @@ -231,7 +231,7 @@ public class CreateNetworkOfferingTest extends TestCase { serviceProviderMap.put(Network.Service.Lb, vrProvider); NetworkOfferingVO off = configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, true, Availability.Optional, 200, serviceProviderMap, false, - Network.GuestType.Isolated, false, null, false, null, false, false, null, false, null, true, true, false, false, null, null, null, false, null); + Network.GuestType.Isolated, false, null, false, null, false, false, null, false, null, true, true, false, false, null, null, null, false, null, null, false); // System.out.println("Creating Vpc Network Offering"); assertNotNull("Vpc Isolated network offering with Vpc provider ", off); } @@ -251,7 +251,7 @@ public class CreateNetworkOfferingTest extends TestCase { serviceProviderMap.put(Network.Service.Lb, lbProvider); NetworkOfferingVO off = configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, true, Availability.Optional, 200, serviceProviderMap, false, - Network.GuestType.Isolated, false, null, false, null, false, false, null, false, null, true, true, false, false, null, null, null, false, null); + Network.GuestType.Isolated, false, null, false, null, false, false, null, false, null, true, true, false, false, null, null, null, false, null, null, false); // System.out.println("Creating Vpc Network Offering"); assertNotNull("Vpc Isolated network offering with Vpc and Netscaler provider ", off); } diff --git a/server/src/test/java/org/apache/cloudstack/resource/ResourceCleanupServiceImplTest.java b/server/src/test/java/org/apache/cloudstack/resource/ResourceCleanupServiceImplTest.java new file mode 100644 index 00000000000..7446e290488 --- /dev/null +++ b/server/src/test/java/org/apache/cloudstack/resource/ResourceCleanupServiceImplTest.java @@ -0,0 +1,656 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.resource; + +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Date; +import java.util.HashSet; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.apache.cloudstack.api.command.admin.resource.PurgeExpungedResourcesCmd; +import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.jobs.dao.VmWorkJobDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; +import org.apache.commons.collections.CollectionUtils; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.ha.HighAvailabilityManager; +import com.cloud.network.as.dao.AutoScaleVmGroupVmMapDao; +import com.cloud.network.dao.IPAddressDao; +import com.cloud.network.dao.InlineLoadBalancerNicMapDao; +import com.cloud.network.dao.LoadBalancerVMMapDao; +import com.cloud.network.dao.OpRouterMonitorServiceDao; +import com.cloud.network.rules.dao.PortForwardingRulesDao; +import com.cloud.offering.ServiceOffering; +import com.cloud.secstorage.CommandExecLogDao; +import com.cloud.service.dao.ServiceOfferingDetailsDao; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.SnapshotDetailsDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; +import com.cloud.utils.Pair; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.ItWorkDao; +import com.cloud.vm.NicVO; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.ConsoleSessionDao; +import com.cloud.vm.dao.NicDao; +import com.cloud.vm.dao.NicDetailsDao; +import com.cloud.vm.dao.NicExtraDhcpOptionDao; +import com.cloud.vm.dao.NicSecondaryIpDao; +import com.cloud.vm.dao.UserVmDetailsDao; +import com.cloud.vm.dao.VMInstanceDao; +import com.cloud.vm.snapshot.VMSnapshotVO; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao; + +@RunWith(MockitoJUnitRunner.class) +public class ResourceCleanupServiceImplTest { + + @Mock + VMInstanceDao vmInstanceDao; + @Mock + VolumeDao volumeDao; + @Mock + VolumeDetailsDao volumeDetailsDao; + @Mock + VolumeDataStoreDao volumeDataStoreDao; + @Mock + SnapshotDao snapshotDao; + @Mock + SnapshotDetailsDao snapshotDetailsDao; + @Mock + SnapshotDataStoreDao snapshotDataStoreDao; + @Mock + NicDao nicDao; + @Mock + NicDetailsDao nicDetailsDao; + @Mock + NicExtraDhcpOptionDao nicExtraDhcpOptionDao; + @Mock + InlineLoadBalancerNicMapDao inlineLoadBalancerNicMapDao; + @Mock + VMSnapshotDao vmSnapshotDao; + @Mock + VMSnapshotDetailsDao vmSnapshotDetailsDao; + @Mock + UserVmDetailsDao userVmDetailsDao; + @Mock + AutoScaleVmGroupVmMapDao autoScaleVmGroupVmMapDao; + @Mock + CommandExecLogDao commandExecLogDao; + @Mock + NetworkOrchestrationService networkOrchestrationService; + @Mock + LoadBalancerVMMapDao loadBalancerVMMapDao; + @Mock + NicSecondaryIpDao nicSecondaryIpDao; + @Mock + HighAvailabilityManager highAvailabilityManager; + @Mock + ItWorkDao itWorkDao; + @Mock + OpRouterMonitorServiceDao opRouterMonitorServiceDao; + @Mock + PortForwardingRulesDao portForwardingRulesDao; + @Mock + IPAddressDao ipAddressDao; + @Mock + VmWorkJobDao vmWorkJobDao; + @Mock + ConsoleSessionDao consoleSessionDao; + @Mock + ServiceOfferingDetailsDao serviceOfferingDetailsDao; + + @Spy + @InjectMocks + ResourceCleanupServiceImpl resourceCleanupService = Mockito.spy(new ResourceCleanupServiceImpl()); + + List ids = List.of(1L, 2L); + Long batchSize = 100L; + + private void overrideConfigValue(final ConfigKey configKey, final Object value) { + try { + Field f = ConfigKey.class.getDeclaredField("_value"); + f.setAccessible(true); + f.set(configKey, value); + } catch (IllegalAccessException | NoSuchFieldException e) { + Assert.fail(e.getMessage()); + } + } + + + @Test + public void testPurgeLinkedSnapshotEntitiesNoSnapshots() { + resourceCleanupService.purgeLinkedSnapshotEntities(new ArrayList<>(), batchSize); + Mockito.verify(snapshotDetailsDao, Mockito.never()) + .batchExpungeForResources(Mockito.anyList(), Mockito.anyLong()); + Mockito.verify(snapshotDataStoreDao, Mockito.never()) + .expungeBySnapshotList(Mockito.anyList(), Mockito.anyLong()); + } + + + @Test + public void testPurgeLinkedSnapshotEntities() { + Mockito.when(snapshotDetailsDao.batchExpungeForResources(ids, batchSize)).thenReturn(2L); + Mockito.when(snapshotDataStoreDao.expungeBySnapshotList(ids, batchSize)).thenReturn(2); + resourceCleanupService.purgeLinkedSnapshotEntities(ids, batchSize); + Mockito.verify(snapshotDetailsDao, Mockito.times(1)) + .batchExpungeForResources(ids, batchSize); + Mockito.verify(snapshotDataStoreDao, Mockito.times(1)) + .expungeBySnapshotList(ids, batchSize); + } + + @Test + public void testPurgeVolumeSnapshotsNoVolumes() { + Assert.assertEquals(0, resourceCleanupService.purgeVolumeSnapshots(new ArrayList<>(), 50L)); + Mockito.verify(snapshotDao, Mockito.never()).createSearchBuilder(); + } + + @Test + public void testPurgeVolumeSnapshots() { + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + Mockito.when(sb.entity()).thenReturn(Mockito.mock(SnapshotVO.class)); + Mockito.when(sb.create()).thenReturn(Mockito.mock(SearchCriteria.class)); + Mockito.when(snapshotDao.createSearchBuilder()).thenReturn(sb); + Assert.assertEquals(0, resourceCleanupService.purgeVolumeSnapshots(new ArrayList<>(), 50L)); + Mockito.when(snapshotDao.searchIncludingRemoved(Mockito.any(), Mockito.any(), + Mockito.any(), Mockito.anyBoolean())) + .thenReturn(List.of(Mockito.mock(SnapshotVO.class), Mockito.mock(SnapshotVO.class))); + Mockito.when(snapshotDao.expungeList(Mockito.anyList())).thenReturn(2); + Assert.assertEquals(2, resourceCleanupService.purgeVolumeSnapshots(ids, batchSize)); + } + + @Test + public void testPurgeLinkedVolumeEntitiesNoVolumes() { + resourceCleanupService.purgeLinkedVolumeEntities(new ArrayList<>(), 50L); + Mockito.verify(volumeDetailsDao, Mockito.never()).batchExpungeForResources(Mockito.anyList(), + Mockito.anyLong()); + } + + @Test + public void testPurgeLinkedVolumeEntities() { + Mockito.when(volumeDetailsDao.batchExpungeForResources(ids, batchSize)).thenReturn(2L); + Mockito.when(volumeDataStoreDao.expungeByVolumeList(ids, batchSize)).thenReturn(2); + Mockito.doReturn(2L).when(resourceCleanupService).purgeVolumeSnapshots(ids, batchSize); + resourceCleanupService.purgeLinkedVolumeEntities(ids, batchSize); + Mockito.verify(volumeDetailsDao, Mockito.times(1)) + .batchExpungeForResources(ids, batchSize); + Mockito.verify(volumeDataStoreDao, Mockito.times(1)) + .expungeByVolumeList(ids, batchSize); + Mockito.verify(resourceCleanupService, Mockito.times(1)) + .purgeVolumeSnapshots(ids, batchSize); + } + + @Test + public void testPurgeVMVolumesNoVms() { + Assert.assertEquals(0, resourceCleanupService.purgeVMVolumes(new ArrayList<>(), 50L)); + Mockito.verify(volumeDao, Mockito.never()).searchRemovedByVms(Mockito.anyList(), Mockito.anyLong()); + } + + @Test + public void testPurgeVMVolumes() { + Mockito.when(volumeDao.searchRemovedByVms(ids, batchSize)) + .thenReturn(List.of(Mockito.mock(VolumeVO.class), Mockito.mock(VolumeVO.class))); + Mockito.when(volumeDao.expungeList(Mockito.anyList())).thenReturn(2); + Mockito.doNothing().when(resourceCleanupService).purgeLinkedVolumeEntities(Mockito.anyList(), + Mockito.eq(batchSize)); + Assert.assertEquals(2, resourceCleanupService.purgeVMVolumes(ids, batchSize)); + } + + @Test + public void testPurgeLinkedNicEntitiesNoNics() { + resourceCleanupService.purgeLinkedNicEntities(new ArrayList<>(), batchSize); + Mockito.verify(nicDetailsDao, Mockito.never()) + .batchExpungeForResources(ids, batchSize); + Mockito.verify(nicExtraDhcpOptionDao, Mockito.never()) + .expungeByNicList(ids, batchSize); + Mockito.verify(inlineLoadBalancerNicMapDao, Mockito.never()) + .expungeByNicList(ids, batchSize); + } + + @Test + public void testPurgeLinkedNicEntities() { + Mockito.when(nicDetailsDao.batchExpungeForResources(ids, batchSize)).thenReturn(2L); + Mockito.when(nicExtraDhcpOptionDao.expungeByNicList(ids, batchSize)).thenReturn(2); + Mockito.when(inlineLoadBalancerNicMapDao.expungeByNicList(ids, batchSize)).thenReturn(2); + resourceCleanupService.purgeLinkedNicEntities(ids, batchSize); + Mockito.verify(nicDetailsDao, Mockito.times(1)) + .batchExpungeForResources(ids, batchSize); + Mockito.verify(nicExtraDhcpOptionDao, Mockito.times(1)) + .expungeByNicList(ids, batchSize); + Mockito.verify(inlineLoadBalancerNicMapDao, Mockito.times(1)) + .expungeByNicList(ids, batchSize); + } + + @Test + public void testPurgeVMNicsNoVms() { + Assert.assertEquals(0, resourceCleanupService.purgeVMNics(new ArrayList<>(), 50L)); + Mockito.verify(nicDao, Mockito.never()).searchRemovedByVms(Mockito.anyList(), Mockito.anyLong()); + } + + @Test + public void testPurgeVMNics() { + Mockito.when(nicDao.searchRemovedByVms(ids, batchSize)) + .thenReturn(List.of(Mockito.mock(NicVO.class), Mockito.mock(NicVO.class))); + Mockito.when(nicDao.expungeList(Mockito.anyList())).thenReturn(2); + Mockito.doNothing().when(resourceCleanupService).purgeLinkedNicEntities(Mockito.anyList(), + Mockito.eq(batchSize)); + Assert.assertEquals(2, resourceCleanupService.purgeVMNics(ids, batchSize)); + } + + @Test + public void testPurgeVMSnapshotsNoVms() { + Assert.assertEquals(0, resourceCleanupService.purgeVMSnapshots(new ArrayList<>(), 50L)); + Mockito.verify(vmSnapshotDao, Mockito.never()).searchRemovedByVms(Mockito.anyList(), Mockito.anyLong()); + } + + @Test + public void testPurgeVMSnapshots() { + Mockito.when(vmSnapshotDao.searchRemovedByVms(ids, batchSize)) + .thenReturn(List.of(Mockito.mock(VMSnapshotVO.class), Mockito.mock(VMSnapshotVO.class))); + Mockito.when(vmSnapshotDao.expungeList(Mockito.anyList())).thenReturn(2); + Mockito.when(vmSnapshotDetailsDao.batchExpungeForResources(Mockito.anyList(), + Mockito.eq(batchSize))).thenReturn(2L); + Assert.assertEquals(2, resourceCleanupService.purgeVMSnapshots(ids, batchSize)); + } + + @Test + public void testPurgeLinkedVMEntitiesNoVms() { + resourceCleanupService.purgeLinkedVMEntities(new ArrayList<>(), 50L); + Mockito.verify(resourceCleanupService, Mockito.never()).purgeVMVolumes(Mockito.anyList(), + Mockito.anyLong()); + Mockito.verify(userVmDetailsDao, Mockito.never()) + .batchExpungeForResources(Mockito.anyList(), Mockito.anyLong()); + } + + @Test + public void testPurgeLinkedVMEntities() { + Mockito.doReturn(2L).when(resourceCleanupService).purgeVMVolumes(Mockito.anyList(), + Mockito.eq(batchSize)); + Mockito.doReturn(2L).when(resourceCleanupService).purgeVMNics(Mockito.anyList(), + Mockito.eq(batchSize)); + Mockito.when(userVmDetailsDao.batchExpungeForResources(Mockito.anyList(), Mockito.anyLong())).thenReturn(2L); + Mockito.doReturn(2L).when(resourceCleanupService).purgeVMSnapshots(Mockito.anyList(), + Mockito.eq(batchSize)); + Mockito.when(autoScaleVmGroupVmMapDao.expungeByVmList(Mockito.anyList(), Mockito.anyLong())).thenReturn(2); + Mockito.when(commandExecLogDao.expungeByVmList(Mockito.anyList(), Mockito.anyLong())).thenReturn(2); + Mockito.when(loadBalancerVMMapDao.expungeByVmList(Mockito.anyList(), Mockito.anyLong())).thenReturn(2); + Mockito.when(nicSecondaryIpDao.expungeByVmList(Mockito.anyList(), Mockito.anyLong())).thenReturn(2); + Mockito.when(highAvailabilityManager.expungeWorkItemsByVmList(Mockito.anyList(), Mockito.anyLong())) + .thenReturn(2); + Mockito.when(itWorkDao.expungeByVmList(Mockito.anyList(), Mockito.anyLong())).thenReturn(2); + Mockito.when(opRouterMonitorServiceDao.expungeByVmList(Mockito.anyList(), Mockito.anyLong())).thenReturn(2); + Mockito.when(portForwardingRulesDao.expungeByVmList(Mockito.anyList(), Mockito.anyLong())).thenReturn(2); + Mockito.when(ipAddressDao.expungeByVmList(Mockito.anyList(), Mockito.anyLong())).thenReturn(2); + Mockito.when(vmWorkJobDao.expungeByVmList(Mockito.anyList(), Mockito.anyLong())).thenReturn(2); + Mockito.when(consoleSessionDao.expungeByVmList(Mockito.anyList(), Mockito.anyLong())).thenReturn(2); + + resourceCleanupService.purgeLinkedVMEntities(ids, batchSize); + + Mockito.verify(resourceCleanupService, Mockito.times(1)).purgeVMVolumes(ids, batchSize); + Mockito.verify(resourceCleanupService, Mockito.times(1)).purgeVMNics(ids, batchSize); + Mockito.verify(userVmDetailsDao, Mockito.times(1)) + .batchExpungeForResources(ids, batchSize); + Mockito.verify(resourceCleanupService, Mockito.times(1)) + .purgeVMSnapshots(ids, batchSize); + Mockito.verify(autoScaleVmGroupVmMapDao, Mockito.times(1)) + .expungeByVmList(ids, batchSize); + Mockito.verify(commandExecLogDao, Mockito.times(1)).expungeByVmList(ids, batchSize); + Mockito.verify(loadBalancerVMMapDao, Mockito.times(1)).expungeByVmList(ids, batchSize); + Mockito.verify(nicSecondaryIpDao, Mockito.times(1)).expungeByVmList(ids, batchSize); + Mockito.verify(highAvailabilityManager, Mockito.times(1)). + expungeWorkItemsByVmList(ids, batchSize); + Mockito.verify(itWorkDao, Mockito.times(1)).expungeByVmList(ids, batchSize); + Mockito.verify(opRouterMonitorServiceDao, Mockito.times(1)) + .expungeByVmList(ids, batchSize); + Mockito.verify(portForwardingRulesDao, Mockito.times(1)).expungeByVmList(ids, batchSize); + Mockito.verify(ipAddressDao, Mockito.times(1)).expungeByVmList(ids, batchSize); + Mockito.verify(vmWorkJobDao, Mockito.times(1)).expungeByVmList(ids, batchSize); + Mockito.verify(consoleSessionDao, Mockito.times(1)).expungeByVmList(ids, batchSize); + } + + @Test + public void testGetVmIdsWithActiveVolumeSnapshotsNoVms() { + Assert.assertTrue(CollectionUtils.isEmpty( + resourceCleanupService.getVmIdsWithActiveVolumeSnapshots(new ArrayList<>()))); + } + + @Test + public void testGetVmIdsWithActiveVolumeSnapshots() { + VolumeVO vol1 = Mockito.mock(VolumeVO.class); + Mockito.when(vol1.getId()).thenReturn(1L); + Mockito.when(vol1.getInstanceId()).thenReturn(1L); + VolumeVO vol2 = Mockito.mock(VolumeVO.class); + Mockito.when(vol2.getId()).thenReturn(2L); + Mockito.when(volumeDao.searchRemovedByVms(ids, null)).thenReturn(List.of(vol1, vol2)); + SnapshotVO snapshotVO = Mockito.mock(SnapshotVO.class); + Mockito.when(snapshotVO.getVolumeId()).thenReturn(1L); + Mockito.when(snapshotDao.searchByVolumes(Mockito.anyList())).thenReturn(List.of(snapshotVO)); + HashSet vmIds = resourceCleanupService.getVmIdsWithActiveVolumeSnapshots(ids); + Assert.assertTrue(CollectionUtils.isNotEmpty(vmIds)); + Assert.assertEquals(1, vmIds.size()); + Assert.assertEquals(1L, vmIds.toArray()[0]); + } + + @Test + public void testGetFilteredVmIdsForSnapshots() { + Long skippedVmIds = ids.get(0); + Long notSkippedVmIds = ids.get(1); + VMSnapshotVO vmSnapshotVO = Mockito.mock(VMSnapshotVO.class); + Mockito.when(vmSnapshotVO.getVmId()).thenReturn(1L); + Mockito.when(vmSnapshotDao.searchByVms(Mockito.anyList())).thenReturn(List.of(vmSnapshotVO)); + HashSet set = new HashSet<>(); + set.add(1L); + Mockito.doReturn(set).when(resourceCleanupService).getVmIdsWithActiveVolumeSnapshots(ids); + Pair, List> result = resourceCleanupService.getFilteredVmIdsForSnapshots(new ArrayList<>(ids)); + Assert.assertEquals(1, result.first().size()); + Assert.assertEquals(1, result.second().size()); + Assert.assertEquals(notSkippedVmIds, result.first().get(0)); + Assert.assertEquals(skippedVmIds, result.second().get(0)); + } + + @Test + public void testGetVmIdsWithNoActiveSnapshots() { + VMInstanceVO vm1 = Mockito.mock(VMInstanceVO.class); + Mockito.when(vm1.getId()).thenReturn(ids.get(0)); + VMInstanceVO vm2 = Mockito.mock(VMInstanceVO.class); + Mockito.when(vm2.getId()).thenReturn(ids.get(1)); + Mockito.when(vmInstanceDao.searchRemovedByRemoveDate(Mockito.any(), Mockito.any(), + Mockito.anyLong(), Mockito.anyList())).thenReturn(List.of(vm1, vm2)); + Long skippedVmIds = ids.get(0); + Long notSkippedVmIds = ids.get(1); + VMSnapshotVO vmSnapshotVO = Mockito.mock(VMSnapshotVO.class); + Mockito.when(vmSnapshotVO.getVmId()).thenReturn(1L); + Mockito.when(vmSnapshotDao.searchByVms(Mockito.anyList())).thenReturn(List.of(vmSnapshotVO)); + HashSet set = new HashSet<>(); + set.add(1L); + Mockito.doReturn(set).when(resourceCleanupService).getVmIdsWithActiveVolumeSnapshots(Mockito.anyList()); + Pair, List> result = + resourceCleanupService.getVmIdsWithNoActiveSnapshots(new Date(), new Date(), batchSize, + new ArrayList<>()); + Assert.assertEquals(1, result.first().size()); + Assert.assertEquals(1, result.second().size()); + Assert.assertEquals(notSkippedVmIds, result.first().get(0)); + Assert.assertEquals(skippedVmIds, result.second().get(0)); + } + + @Test + public void testPurgeVMEntitiesNoVms() { + Mockito.when(vmInstanceDao.searchRemovedByRemoveDate(Mockito.any(), Mockito.any(), + Mockito.anyLong(), Mockito.anyList())).thenReturn(new ArrayList<>()); + Assert.assertEquals(0, resourceCleanupService.purgeVMEntities(batchSize, new Date(), new Date())); + } + + @Test + public void testPurgeVMEntities() { + Mockito.doReturn(new Pair<>(ids, new ArrayList<>())).when(resourceCleanupService) + .getVmIdsWithNoActiveSnapshots(Mockito.any(), Mockito.any(), Mockito.anyLong(), Mockito.anyList()); + Mockito.when(vmInstanceDao.expungeList(ids)).thenReturn(ids.size()); + Assert.assertEquals(ids.size(), resourceCleanupService.purgeVMEntities(batchSize, new Date(), new Date())); + } + + @Test + public void testExpungeVMEntityFiltered() { + Mockito.doReturn(new Pair<>(new ArrayList<>(), List.of(ids.get(0)))).when(resourceCleanupService) + .getFilteredVmIdsForSnapshots(Mockito.anyList()); + Assert.assertFalse(resourceCleanupService.purgeVMEntity(ids.get(0))); + } + + @Test + public void testPurgeVMEntityFiltered() { + Mockito.doReturn(new Pair<>(List.of(ids.get(0)), new ArrayList<>())).when(resourceCleanupService) + .getFilteredVmIdsForSnapshots(Mockito.anyList()); + Mockito.doNothing().when(resourceCleanupService) + .purgeLinkedVMEntities(Mockito.anyList(), Mockito.anyLong()); + Mockito.when(vmInstanceDao.expunge(ids.get(0))).thenReturn(true); + Assert.assertTrue(resourceCleanupService.purgeVMEntity(ids.get(0))); + } + + @Test + public void testPurgeVMEntity() { + Mockito.doReturn(new Pair<>(List.of(ids.get(0)), new ArrayList<>())).when(resourceCleanupService) + .getFilteredVmIdsForSnapshots(Mockito.anyList()); + Mockito.doNothing().when(resourceCleanupService) + .purgeLinkedVMEntities(Mockito.anyList(), Mockito.anyLong()); + Mockito.when(vmInstanceDao.expunge(ids.get(0))).thenReturn(true); + Assert.assertTrue(resourceCleanupService.purgeVMEntity(ids.get(0))); + } + + @Test + public void testPurgeEntities() { + Mockito.doReturn((long)ids.size()).when(resourceCleanupService) + .purgeVMEntities(Mockito.anyLong(), Mockito.any(), Mockito.any()); + long result = resourceCleanupService.purgeEntities( + List.of(ResourceCleanupService.ResourceType.VirtualMachine), batchSize, new Date(), new Date()); + Assert.assertEquals(ids.size(), result); + } + + @Test(expected = InvalidParameterValueException.class) + public void testGetResourceTypeAndValidatePurgeExpungedResourcesCmdParamsInvalidResourceType() { + resourceCleanupService.getResourceTypeAndValidatePurgeExpungedResourcesCmdParams("Volume", + new Date(), new Date(), batchSize); + } + + @Test(expected = InvalidParameterValueException.class) + public void testGetResourceTypeAndValidatePurgeExpungedResourcesCmdParamsInvalidBatchSize() { + resourceCleanupService.getResourceTypeAndValidatePurgeExpungedResourcesCmdParams( + ResourceCleanupService.ResourceType.VirtualMachine.toString(), + new Date(), new Date(), -1L); + } + + @Test(expected = InvalidParameterValueException.class) + public void testGetResourceTypeAndValidatePurgeExpungedResourcesCmdParamsInvalidDates() { + Calendar cal = Calendar.getInstance(); + Date startDate = new Date(); + cal.setTime(startDate); + cal.add(Calendar.DATE, -1); + Date endDate = cal.getTime(); + resourceCleanupService.getResourceTypeAndValidatePurgeExpungedResourcesCmdParams( + ResourceCleanupService.ResourceType.VirtualMachine.toString(), + startDate, endDate, 100L); + } + + @Test + public void testGetResourceTypeAndValidatePurgeExpungedResourcesCmdParams() { + Calendar cal = Calendar.getInstance(); + Date endDate = new Date(); + cal.setTime(endDate); + cal.add(Calendar.DATE, -1); + Date startDate = cal.getTime(); + ResourceCleanupService.ResourceType type = + resourceCleanupService.getResourceTypeAndValidatePurgeExpungedResourcesCmdParams( + ResourceCleanupService.ResourceType.VirtualMachine.toString(), + startDate, endDate, 100L); + Assert.assertEquals(ResourceCleanupService.ResourceType.VirtualMachine, type); + } + + @Test + public void testGetResourceTypeAndValidatePurgeExpungedResourcesCmdParamsNoValues() { + ResourceCleanupService.ResourceType type = + resourceCleanupService.getResourceTypeAndValidatePurgeExpungedResourcesCmdParams( + null, null, null, null); + Assert.assertNull(type); + } + + @Test + public void testIsVmOfferingPurgeResourcesEnabled() { + Mockito.when(serviceOfferingDetailsDao.getDetail(1L, + ServiceOffering.PURGE_DB_ENTITIES_KEY)).thenReturn(null); + Assert.assertFalse(resourceCleanupService.isVmOfferingPurgeResourcesEnabled(1L)); + Mockito.when(serviceOfferingDetailsDao.getDetail(2L, + ServiceOffering.PURGE_DB_ENTITIES_KEY)).thenReturn("false"); + Assert.assertFalse(resourceCleanupService.isVmOfferingPurgeResourcesEnabled(2L)); + Mockito.when(serviceOfferingDetailsDao.getDetail(3L, + ServiceOffering.PURGE_DB_ENTITIES_KEY)).thenReturn("true"); + Assert.assertTrue(resourceCleanupService.isVmOfferingPurgeResourcesEnabled(3L)); + } + + @Test + public void testPurgeExpungedResource() { + Assert.assertFalse(resourceCleanupService.purgeExpungedResource(1L, null)); + + Mockito.doReturn(true).when(resourceCleanupService) + .purgeExpungedResource(Mockito.anyLong(), Mockito.any()); + Assert.assertTrue(resourceCleanupService.purgeExpungedResource(1L, + ResourceCleanupService.ResourceType.VirtualMachine)); + } + + @Test(expected = InvalidParameterValueException.class) + public void testPurgeExpungedResourcesInvalidResourceType() { + PurgeExpungedResourcesCmd cmd = Mockito.mock(PurgeExpungedResourcesCmd.class); + Mockito.when(cmd.getResourceType()).thenReturn("Volume"); + resourceCleanupService.purgeExpungedResources(cmd); + } + + @Test(expected = InvalidParameterValueException.class) + public void testPurgeExpungedResourcesInvalidBatchSize() { + PurgeExpungedResourcesCmd cmd = Mockito.mock(PurgeExpungedResourcesCmd.class); + Mockito.when(cmd.getBatchSize()).thenReturn(-1L); + resourceCleanupService.purgeExpungedResources(cmd); + } + + @Test(expected = InvalidParameterValueException.class) + public void testPurgeExpungedResourcesInvalidDates() { + Calendar cal = Calendar.getInstance(); + Date startDate = new Date(); + cal.setTime(startDate); + cal.add(Calendar.DATE, -1); + Date endDate = cal.getTime(); + PurgeExpungedResourcesCmd cmd = Mockito.mock(PurgeExpungedResourcesCmd.class); + Mockito.when(cmd.getStartDate()).thenReturn(startDate); + Mockito.when(cmd.getEndDate()).thenReturn(endDate); + resourceCleanupService.purgeExpungedResources(cmd); + } + + @Test + public void testPurgeExpungedResources() { + Mockito.doReturn((long)ids.size()).when(resourceCleanupService).purgeExpungedResourceUsingJob( + ResourceCleanupService.ResourceType.VirtualMachine, batchSize, null, null); + PurgeExpungedResourcesCmd cmd = Mockito.mock(PurgeExpungedResourcesCmd.class); + Mockito.when(cmd.getResourceType()).thenReturn(ResourceCleanupService.ResourceType.VirtualMachine.toString()); + Mockito.when(cmd.getBatchSize()).thenReturn(batchSize); + long result = resourceCleanupService.purgeExpungedResources(cmd); + Assert.assertEquals(ids.size(), result); + } + + @Test + public void testExpungedVmResourcesLaterIfNeededFalse() { + VirtualMachine vm = Mockito.mock(VirtualMachine.class); + Mockito.when(vm.getServiceOfferingId()).thenReturn(1L); + Mockito.doReturn(false).when(resourceCleanupService).isVmOfferingPurgeResourcesEnabled(1L); + resourceCleanupService.purgeExpungedVmResourcesLaterIfNeeded(vm); + Mockito.verify(resourceCleanupService, Mockito.never()).purgeExpungedResourceLater(Mockito.anyLong(), Mockito.any()); + } + + @Test + public void testExpungedVmResourcesLaterIfNeeded() { + VirtualMachine vm = Mockito.mock(VirtualMachine.class); + Mockito.when(vm.getServiceOfferingId()).thenReturn(1L); + Mockito.doReturn(true).when(resourceCleanupService).isVmOfferingPurgeResourcesEnabled(1L); + Mockito.doNothing().when(resourceCleanupService).purgeExpungedResourceLater(Mockito.anyLong(), Mockito.any()); + resourceCleanupService.purgeExpungedVmResourcesLaterIfNeeded(vm); + Mockito.verify(resourceCleanupService, Mockito.times(1)) + .purgeExpungedResourceLater(Mockito.anyLong(), Mockito.any()); + } + + @Test + public void testGetBatchSizeFromConfig() { + int value = 50; + overrideConfigValue(ResourceCleanupService.ExpungedResourcesPurgeBatchSize, String.valueOf(value)); + Assert.assertEquals(value, resourceCleanupService.getBatchSizeFromConfig()); + } + + @Test + public void testGetResourceTypesFromConfigEmpty() { + overrideConfigValue(ResourceCleanupService.ExpungedResourcePurgeResources, ""); + Assert.assertNull(resourceCleanupService.getResourceTypesFromConfig()); + } + + @Test + public void testGetResourceTypesFromConfig() { + overrideConfigValue(ResourceCleanupService.ExpungedResourcePurgeResources, "VirtualMachine"); + List types = resourceCleanupService.getResourceTypesFromConfig(); + Assert.assertEquals(1, types.size()); + } + + @Test + public void testCalculatePastDateFromConfigNull() { + Assert.assertNull(resourceCleanupService.calculatePastDateFromConfig( + ResourceCleanupService.ExpungedResourcesPurgeKeepPastDays.key(), + null)); + Assert.assertNull(resourceCleanupService.calculatePastDateFromConfig( + ResourceCleanupService.ExpungedResourcesPurgeKeepPastDays.key(), + 0)); + } + + @Test(expected = CloudRuntimeException.class) + public void testCalculatePastDateFromConfigFail() { + Assert.assertNull(resourceCleanupService.calculatePastDateFromConfig( + ResourceCleanupService.ExpungedResourcesPurgeKeepPastDays.key(), + -1)); + } + + @Test + public void testCalculatePastDateFromConfig() { + int days = 10; + Date result = resourceCleanupService.calculatePastDateFromConfig( + ResourceCleanupService.ExpungedResourcesPurgeKeepPastDays.key(), + days); + Date today = new Date(); + long diff = today.getTime() - result.getTime(); + Assert.assertEquals(days, TimeUnit.DAYS.convert(diff, TimeUnit.MILLISECONDS)); + } + + @Test + public void testParseDateFromConfig() { + Assert.assertNull(resourceCleanupService.parseDateFromConfig( + ResourceCleanupService.ExpungedResourcesPurgeStartTime.key(), "")); + Date date = resourceCleanupService.parseDateFromConfig( + ResourceCleanupService.ExpungedResourcesPurgeStartTime.key(), "2020-01-01"); + Assert.assertNotNull(date); + Calendar calendar = Calendar.getInstance(); + calendar.setTime(date); + Assert.assertEquals(2020, calendar.get(Calendar.YEAR)); + Assert.assertEquals(0, calendar.get(Calendar.MONTH)); + Assert.assertEquals(1, calendar.get(Calendar.DATE)); + } + + @Test(expected = CloudRuntimeException.class) + public void testParseDateFromConfigFail() { + resourceCleanupService.parseDateFromConfig( + ResourceCleanupService.ExpungedResourcesPurgeStartTime.key(), "ABC"); + } +} diff --git a/server/src/test/java/org/apache/cloudstack/storage/sharedfs/SharedFSServiceImplTest.java b/server/src/test/java/org/apache/cloudstack/storage/sharedfs/SharedFSServiceImplTest.java new file mode 100644 index 00000000000..88493d10038 --- /dev/null +++ b/server/src/test/java/org/apache/cloudstack/storage/sharedfs/SharedFSServiceImplTest.java @@ -0,0 +1,665 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.sharedfs; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.command.user.storage.sharedfs.ChangeSharedFSDiskOfferingCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.ChangeSharedFSServiceOfferingCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.CreateSharedFSCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.DestroySharedFSCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.ListSharedFSCmd; +import org.apache.cloudstack.api.command.user.storage.sharedfs.UpdateSharedFSCmd; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.storage.sharedfs.dao.SharedFSDao; +import org.apache.cloudstack.storage.sharedfs.query.dao.SharedFSJoinDao; +import org.apache.cloudstack.storage.sharedfs.query.vo.SharedFSJoinVO; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.configuration.ConfigurationManager; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ManagementServerException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.VirtualMachineMigrationException; +import com.cloud.network.Network; +import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkVO; +import com.cloud.org.Grouping; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.VolumeApiService; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.utils.Pair; +import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.utils.fsm.StateMachine2; +import com.cloud.vm.NicVO; +import com.cloud.vm.dao.NicDao; + +@RunWith(MockitoJUnitRunner.class) +public class SharedFSServiceImplTest { + + @Mock + private AccountManager accountMgr; + + @Mock + private SharedFSDao sharedFSDao; + + @Mock + private SharedFSJoinDao sharedFSJoinDao; + + @Mock + private DataCenterDao dataCenterDao; + + @Mock + private DiskOfferingDao diskOfferingDao; + + @Mock + VolumeDao volumeDao; + + @Mock + NicDao nicDao; + + @Mock + NetworkDao networkDao; + + @Mock + private ConfigurationManager configMgr; + + @Mock + private VolumeApiService volumeApiService; + + @Mock + private SharedFSProvider provider; + + @Mock + private SharedFSLifeCycle lifeCycle; + + @Spy + @InjectMocks + private SharedFSServiceImpl sharedFSServiceImpl; + + private static final long s_ownerId = 1L; + private static final long s_zoneId = 2L; + private static final long s_diskOfferingId = 3L; + private static final long s_serviceOfferingId = 4L; + private static final long s_domainId = 5L; + private static final long s_volumeId = 6L; + private static final long s_vmId = 7L; + private static final long s_networkId = 8L; + private static final long s_sharedFSId = 9L; + private static final long s_size = 10L; + private static final long s_minIops = 1000L; + private static final long s_maxIops = 2000L; + private static final String s_providerName = "SHAREDFSVM"; + private static final String s_fsFormat = "EXT4"; + private static final String s_name = "TestSharedFS"; + private static final String s_description = "Test Description"; + + @Mock + Account owner; + @Mock + protected StateMachine2 _stateMachine; + + private MockedStatic callContextMocked; + + private AutoCloseable closeable; + + @Before + public void setUp() { + closeable = MockitoAnnotations.openMocks(this); + callContextMocked = mockStatic(CallContext.class); + CallContext callContextMock = mock(CallContext.class); + callContextMocked.when(CallContext::current).thenReturn(callContextMock); + when(callContextMock.getCallingAccount()).thenReturn(owner); + when(accountMgr.getActiveAccountById(s_ownerId)).thenReturn(owner); + + Map mockProviderMap = new HashMap<>(); + mockProviderMap.put(s_providerName, provider); + ReflectionTestUtils.setField(sharedFSServiceImpl, "sharedFSProviderMap", mockProviderMap); + when(sharedFSServiceImpl.getSharedFSProvider(s_providerName)).thenReturn(provider); + when(provider.getSharedFSLifeCycle()).thenReturn(lifeCycle); + ReflectionTestUtils.setField(sharedFSServiceImpl, "sharedFSStateMachine", _stateMachine); + } + + @After + public void tearDown() throws Exception { + callContextMocked.close(); + closeable.close(); + } + + private CreateSharedFSCmd getMockCreateSharedFSCmd() { + CreateSharedFSCmd cmd = mock(CreateSharedFSCmd.class); + when(cmd.getEntityOwnerId()).thenReturn(s_ownerId); + when(cmd.getZoneId()).thenReturn(s_zoneId); + when(cmd.getDiskOfferingId()).thenReturn(s_diskOfferingId); + when(cmd.getSize()).thenReturn(s_size); + when(cmd.getMinIops()).thenReturn(s_minIops); + when(cmd.getMaxIops()).thenReturn(s_maxIops); + when(cmd.getSharedFSProviderName()).thenReturn(s_providerName); + when(cmd.getServiceOfferingId()).thenReturn(s_serviceOfferingId); + when(cmd.getNetworkId()).thenReturn(s_networkId); + when(cmd.getFsFormat()).thenReturn(s_fsFormat); + return cmd; + } + + private SharedFSVO getMockSharedFS() { + SharedFSVO sharedFS = new SharedFSVO(s_name, s_description, s_domainId, s_ownerId, s_zoneId, + s_providerName, SharedFS.Protocol.NFS, SharedFS.FileSystemType.valueOf(s_fsFormat), s_serviceOfferingId); + return sharedFS; + } + + @Test + public void testDeploySharedFS() throws ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException, NoTransitionException, OperationTimedoutException { + CreateSharedFSCmd cmd = getMockCreateSharedFSCmd(); + + SharedFSVO sharedFS = getMockSharedFS(); + when(sharedFSDao.findById(0L)).thenReturn(sharedFS); + + Pair result = new Pair<>(s_volumeId, s_vmId); + when(lifeCycle.deploySharedFS(sharedFS, s_networkId, s_diskOfferingId, s_size, s_minIops, s_maxIops)).thenReturn(result); + when(sharedFSDao.update(sharedFS.getId(), sharedFS)).thenReturn(true); + + Assert.assertEquals(sharedFSServiceImpl.deploySharedFS(cmd), sharedFS); + Assert.assertEquals(Optional.ofNullable(sharedFS.getVmId()), Optional.ofNullable(s_vmId)); + Assert.assertEquals(Optional.ofNullable(sharedFS.getVolumeId()), Optional.ofNullable(s_volumeId)); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.OperationSucceeded, null, sharedFSDao); + } + + @Test + public void testDeploySharedFSException() throws ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException, NoTransitionException, OperationTimedoutException { + CreateSharedFSCmd cmd = getMockCreateSharedFSCmd(); + + SharedFSVO sharedFS = getMockSharedFS(); + when(sharedFSDao.findById(0L)).thenReturn(sharedFS); + + when(lifeCycle.deploySharedFS(sharedFS, s_networkId, s_diskOfferingId, s_size, s_minIops, s_maxIops)).thenThrow(new CloudRuntimeException("")); + + Assert.assertThrows(CloudRuntimeException.class, () -> sharedFSServiceImpl.deploySharedFS(cmd)); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.OperationFailed, null, sharedFSDao); + verify(_stateMachine, never()).transitTo(sharedFS, SharedFS.Event.OperationSucceeded, null, sharedFSDao); + } + + @Test + public void testAllocSharedFS() throws NoTransitionException { + CreateSharedFSCmd cmd = getMockCreateSharedFSCmd(); + + when(dataCenterDao.findById(s_zoneId)).thenReturn(null); + Assert.assertThrows(InvalidParameterValueException.class, () -> sharedFSServiceImpl.allocSharedFS(cmd)); + + DataCenterVO zone = mock(DataCenterVO.class); + when(dataCenterDao.findById(s_zoneId)).thenReturn(zone); + when(zone.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); + + DiskOfferingVO diskOfferingVO = mock(DiskOfferingVO.class); + when(diskOfferingDao.findById(s_diskOfferingId)).thenReturn(diskOfferingVO); + when(diskOfferingVO.isCustomized()).thenReturn(true); + when(diskOfferingVO.isCustomizedIops()).thenReturn(true); + + SharedFSVO sharedFS = getMockSharedFS(); + ReflectionTestUtils.setField(sharedFS, "id", s_sharedFSId); + + when(cmd.getNetworkId()).thenReturn(s_networkId); + NetworkVO networkVO = mock(NetworkVO.class); + when(networkVO.getGuestType()).thenReturn(Network.GuestType.Isolated); + when(networkDao.findById(s_networkId)).thenReturn(networkVO); + + sharedFSServiceImpl.allocSharedFS(cmd); + Assert.assertEquals(Optional.ofNullable(sharedFS.getAccountId()), Optional.ofNullable(s_ownerId)); + Assert.assertEquals(Optional.ofNullable(sharedFS.getDataCenterId()), Optional.ofNullable(s_zoneId)); + Assert.assertEquals(Optional.ofNullable(sharedFS.getServiceOfferingId()), Optional.ofNullable(s_serviceOfferingId)); + } + + @Test + public void testAllocSharedFSInvalidZone() { + CreateSharedFSCmd cmd = getMockCreateSharedFSCmd(); + + when(dataCenterDao.findById(s_zoneId)).thenReturn(null); + Assert.assertThrows(InvalidParameterValueException.class, () -> sharedFSServiceImpl.allocSharedFS(cmd)); + + DataCenterVO zone = mock(DataCenterVO.class); + when(dataCenterDao.findById(s_zoneId)).thenReturn(zone); + when(zone.getAllocationState()).thenReturn(Grouping.AllocationState.Disabled); + Assert.assertThrows(PermissionDeniedException.class, () -> sharedFSServiceImpl.allocSharedFS(cmd)); + + when(zone.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); + when(zone.isSecurityGroupEnabled()).thenReturn(true); + Assert.assertThrows(PermissionDeniedException.class, () -> sharedFSServiceImpl.allocSharedFS(cmd)); + } + + @Test + public void tesAllocSharedFSInvalidDiskOffering() { + CreateSharedFSCmd cmd = getMockCreateSharedFSCmd(); + + DataCenterVO zone = mock(DataCenterVO.class); + when(dataCenterDao.findById(s_zoneId)).thenReturn(zone); + when(zone.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); + + DiskOfferingVO diskOfferingVO = mock(DiskOfferingVO.class); + when(diskOfferingDao.findById(s_diskOfferingId)).thenReturn(diskOfferingVO); + when(diskOfferingVO.isCustomized()).thenReturn(false); + Assert.assertThrows(InvalidParameterValueException.class, () -> sharedFSServiceImpl.allocSharedFS(cmd)); + + when(diskOfferingVO.isCustomized()).thenReturn(true); + when(diskOfferingVO.isCustomizedIops()).thenReturn(false); + Assert.assertThrows(InvalidParameterValueException.class, () -> sharedFSServiceImpl.allocSharedFS(cmd)); + } + + @Test + public void testAllocSharedFSInvalidFsFormat() { + CreateSharedFSCmd cmd = getMockCreateSharedFSCmd(); + + DataCenterVO zone = mock(DataCenterVO.class); + when(dataCenterDao.findById(s_zoneId)).thenReturn(zone); + when(zone.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); + + DiskOfferingVO diskOfferingVO = mock(DiskOfferingVO.class); + when(diskOfferingDao.findById(s_diskOfferingId)).thenReturn(diskOfferingVO); + when(diskOfferingVO.isCustomized()).thenReturn(true); + when(diskOfferingVO.isCustomizedIops()).thenReturn(true); + + when(cmd.getNetworkId()).thenReturn(s_networkId); + NetworkVO networkVO = mock(NetworkVO.class); + when(networkVO.getGuestType()).thenReturn(Network.GuestType.Isolated); + when(networkDao.findById(s_networkId)).thenReturn(networkVO); + + when(cmd.getFsFormat()).thenReturn("ext2"); + Assert.assertThrows(InvalidParameterValueException.class, () -> sharedFSServiceImpl.allocSharedFS(cmd)); + } + + @Test + public void testStartSharedFS() throws ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException, OperationTimedoutException, NoTransitionException { + SharedFSVO sharedFS = getMockSharedFS(); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + ReflectionTestUtils.setField(sharedFS, "id", s_sharedFSId); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Stopped); + + Assert.assertEquals(sharedFSServiceImpl.startSharedFS(s_sharedFSId), sharedFS); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.StartRequested, null, sharedFSDao); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.OperationSucceeded, null, sharedFSDao); + } + + @Test + public void testStartSharedFSException() throws ResourceUnavailableException, InsufficientCapacityException, OperationTimedoutException, NoTransitionException { + SharedFSVO sharedFS = getMockSharedFS(); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Stopped); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + doThrow(CloudRuntimeException.class).when(lifeCycle).startSharedFS(sharedFS); + + Assert.assertThrows(CloudRuntimeException.class, () -> sharedFSServiceImpl.startSharedFS(s_sharedFSId)); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.StartRequested, null, sharedFSDao); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.OperationFailed, null, sharedFSDao); + } + + @Test(expected = InvalidParameterValueException.class) + public void testStartSharedFSInvalidState() throws ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException, OperationTimedoutException { + SharedFSVO sharedFS = getMockSharedFS(); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Ready); + sharedFSServiceImpl.startSharedFS(s_sharedFSId); + } + + @Test + public void testStopSharedFS() throws NoTransitionException { + SharedFSVO sharedFS = getMockSharedFS(); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Ready); + Assert.assertEquals(sharedFSServiceImpl.stopSharedFS(s_sharedFSId, false), sharedFS); + verify(lifeCycle, Mockito.times(1)).stopSharedFS(any(), any()); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.StopRequested, null, sharedFSDao); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.OperationSucceeded, null, sharedFSDao); + } + + @Test + public void testStopSharedFSException() throws NoTransitionException { + SharedFSVO sharedFS = getMockSharedFS(); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Ready); + doThrow(CloudRuntimeException.class).when(lifeCycle).stopSharedFS(sharedFS, false); + + Assert.assertThrows(CloudRuntimeException.class, () -> sharedFSServiceImpl.stopSharedFS(s_sharedFSId, false)); + verify(lifeCycle, Mockito.times(1)).stopSharedFS(any(), any()); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.StopRequested, null, sharedFSDao); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.OperationFailed, null, sharedFSDao); + } + + @Test(expected = InvalidParameterValueException.class) + public void testStopSharedFSInvalidState() { + SharedFSVO sharedFS = getMockSharedFS(); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Stopped); + sharedFSServiceImpl.stopSharedFS(s_sharedFSId, false); + } + + @Test + public void testRestartSharedFSWithoutCleanup() throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException, NoTransitionException { + SharedFSVO sharedFS = getMockSharedFS(); + ReflectionTestUtils.setField(sharedFS, "id", s_sharedFSId); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Stopped); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + sharedFSServiceImpl.restartSharedFS(s_sharedFSId, false); + verify(lifeCycle, never()).stopSharedFS(any(), any()); + verify(lifeCycle, Mockito.times(1)).startSharedFS(any()); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.StartRequested, null, sharedFSDao); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.OperationSucceeded, null, sharedFSDao); + } + + @Test + public void testRestartSharedFSWithCleanup() throws OperationTimedoutException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException, NoTransitionException { + SharedFSVO sharedFS = getMockSharedFS(); + ReflectionTestUtils.setField(sharedFS, "id", s_sharedFSId); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Ready); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + + DataCenterVO zone = mock(DataCenterVO.class); + + when(lifeCycle.reDeploySharedFS(sharedFS)).thenReturn(true); + sharedFSServiceImpl.restartSharedFS(s_sharedFSId, true); + verify(lifeCycle, never()).stopSharedFS(any(), any()); + } + + @Test + public void testUpdateSharedFS() { + String newName = "New SharedFS"; + String newDescription = "New SharedFS Description"; + UpdateSharedFSCmd cmd = mock(UpdateSharedFSCmd.class); + when(cmd.getId()).thenReturn(s_sharedFSId); + when(cmd.getName()).thenReturn(newName); + when(cmd.getDescription()).thenReturn(newDescription); + + SharedFSVO sharedFS = getMockSharedFS(); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + + sharedFSServiceImpl.updateSharedFS(cmd); + Assert.assertEquals(sharedFS.getName(), newName); + Assert.assertEquals(sharedFS.getDescription(), newDescription); + } + + @Test + public void testChangeSharedFSDiskOffering() throws ResourceAllocationException { + Long newSize = 200L; + Long newMinIops = 2000L; + Long newMaxIops = 4000L; + Long newDiskOfferingId = 10L; + ChangeSharedFSDiskOfferingCmd cmd = mock(ChangeSharedFSDiskOfferingCmd.class); + when(cmd.getId()).thenReturn(s_sharedFSId); + when(cmd.getDiskOfferingId()).thenReturn(newDiskOfferingId); + when(cmd.getSize()).thenReturn(newSize); + when(cmd.getMinIops()).thenReturn(newMinIops); + when(cmd.getMaxIops()).thenReturn(newMaxIops); + + SharedFSVO sharedFS = getMockSharedFS(); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Ready); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + + DataCenterVO zone = mock(DataCenterVO.class); + when(dataCenterDao.findById(s_zoneId)).thenReturn(zone); + when(zone.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); + + DiskOfferingVO diskOfferingVO = mock(DiskOfferingVO.class); + when(diskOfferingDao.findById(newDiskOfferingId)).thenReturn(diskOfferingVO); + when(diskOfferingVO.isCustomized()).thenReturn(true); + when(diskOfferingVO.isCustomizedIops()).thenReturn(true); + + sharedFSServiceImpl.changeSharedFSDiskOffering(cmd); + } + + @Test(expected = InvalidParameterValueException.class) + public void testChangeSharedFSDiskOfferingInvalidState() throws ResourceAllocationException { + ChangeSharedFSDiskOfferingCmd cmd = mock(ChangeSharedFSDiskOfferingCmd.class); + when(cmd.getId()).thenReturn(s_sharedFSId); + + SharedFSVO sharedFS = getMockSharedFS(); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Destroyed); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + sharedFSServiceImpl.changeSharedFSDiskOffering(cmd); + } + + @Test + public void testChangeSharedFSServiceOffering() throws ResourceUnavailableException, InsufficientCapacityException, ManagementServerException, OperationTimedoutException, NoTransitionException, VirtualMachineMigrationException { + ChangeSharedFSServiceOfferingCmd cmd = mock(ChangeSharedFSServiceOfferingCmd.class); + Long newServiceOfferingId = 100L; + when(cmd.getServiceOfferingId()).thenReturn(newServiceOfferingId); + when(cmd.getId()).thenReturn(s_sharedFSId); + + SharedFSVO sharedFS = getMockSharedFS(); + ReflectionTestUtils.setField(sharedFS, "id", s_sharedFSId); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Stopped); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + + DataCenterVO zone = mock(DataCenterVO.class); + when(dataCenterDao.findById(s_zoneId)).thenReturn(zone); + when(zone.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); + + when(lifeCycle.changeSharedFSServiceOffering(sharedFS, newServiceOfferingId)).thenReturn(true); + + sharedFSServiceImpl.changeSharedFSServiceOffering(cmd); + Assert.assertEquals(Optional.ofNullable(sharedFS.getServiceOfferingId()), Optional.ofNullable(newServiceOfferingId)); + } + + @Test(expected = InvalidParameterValueException.class) + public void testChangeSharedFSServiceOfferingInvalidState() throws ResourceUnavailableException, InsufficientCapacityException, ManagementServerException, OperationTimedoutException, VirtualMachineMigrationException { + ChangeSharedFSServiceOfferingCmd cmd = mock(ChangeSharedFSServiceOfferingCmd.class); + Long newServiceOfferingId = 100L; + when(cmd.getId()).thenReturn(s_sharedFSId); + + SharedFSVO sharedFS = getMockSharedFS(); + ReflectionTestUtils.setField(sharedFS, "id", s_sharedFSId); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Starting); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + + sharedFSServiceImpl.changeSharedFSServiceOffering(cmd); + } + + @Test + public void testDestroySharedFS() throws NoTransitionException { + DestroySharedFSCmd cmd = mock(DestroySharedFSCmd.class); + when(cmd.getId()).thenReturn(s_sharedFSId); + when(cmd.isExpunge()).thenReturn(false); + + SharedFSVO sharedFS = getMockSharedFS(); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Stopped); + + Assert.assertEquals(sharedFSServiceImpl.destroySharedFS(cmd), true); + verify(lifeCycle, never()).deleteSharedFS(any()); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.DestroyRequested, null, sharedFSDao); + } + + @Test(expected = InvalidParameterValueException.class) + public void testDestroySharedFSInvalidState() { + DestroySharedFSCmd cmd = mock(DestroySharedFSCmd.class); + when(cmd.getId()).thenReturn(s_sharedFSId); + when(cmd.isExpunge()).thenReturn(false); + when(cmd.isForced()).thenReturn(false); + + SharedFSVO sharedFS = getMockSharedFS(); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Ready); + + sharedFSServiceImpl.destroySharedFS(cmd); + } + + @Test + public void testRecoverSharedFS() throws NoTransitionException { + SharedFSVO sharedFS = getMockSharedFS(); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Destroyed); + Assert.assertEquals(sharedFSServiceImpl.recoverSharedFS(s_sharedFSId), sharedFS); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.RecoveryRequested, null, sharedFSDao); + } + + @Test(expected = InvalidParameterValueException.class) + public void testRecoverSharedFSInvalidState() { + SharedFSVO sharedFS = getMockSharedFS(); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Expunged); + sharedFSServiceImpl.recoverSharedFS(s_sharedFSId); + } + + @Test + public void testDeleteSharedFS() throws NoTransitionException { + SharedFSVO sharedFS = getMockSharedFS(); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Destroyed); + sharedFSServiceImpl.deleteSharedFS(s_sharedFSId); + verify(lifeCycle, Mockito.times(1)).deleteSharedFS(any()); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.ExpungeOperation, null, sharedFSDao); + } + + @Test (expected = CloudRuntimeException.class) + public void testDeleteSharedFSTransitionException() throws NoTransitionException { + SharedFSVO sharedFS = getMockSharedFS(); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Destroyed); + when(_stateMachine.transitTo(sharedFS, SharedFS.Event.ExpungeOperation, null, sharedFSDao)).thenThrow(new NoTransitionException("")); + sharedFSServiceImpl.deleteSharedFS(s_sharedFSId); + } + + @Test(expected = InvalidParameterValueException.class) + public void testDeleteSharedFSInvalidState() { + SharedFSVO sharedFS = getMockSharedFS(); + when(sharedFSDao.findById(s_sharedFSId)).thenReturn(sharedFS); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Stopped); + sharedFSServiceImpl.deleteSharedFS(s_sharedFSId); + } + + private ListSharedFSCmd getMockListSharedFSCmd() { + ListSharedFSCmd cmd = mock(ListSharedFSCmd.class); + when(cmd.getId()).thenReturn(s_sharedFSId); + when(cmd.getName()).thenReturn(s_name); + when(cmd.getZoneId()).thenReturn(s_zoneId); + when(cmd.getDiskOfferingId()).thenReturn(s_diskOfferingId); + when(cmd.getServiceOfferingId()).thenReturn(s_serviceOfferingId); + when(cmd.getAccountName()).thenReturn("account"); + when(cmd.getDomainId()).thenReturn(s_domainId); + when(cmd.getNetworkId()).thenReturn(s_networkId); + return cmd; + } + + @Test + public void testSearchForSharedFS() { + SearchBuilder sb = mock(SearchBuilder.class); + when(sharedFSDao.createSearchBuilder()).thenReturn(sb); + + SharedFSVO sharedFS = getMockSharedFS(); + when(sb.entity()).thenReturn(sharedFS); + ReflectionTestUtils.setField(sharedFS, "id", s_sharedFSId); + + VolumeVO volume = mock(VolumeVO.class); + SearchBuilder volumeSb = mock(SearchBuilder.class); + when(volumeSb.entity()).thenReturn(volume); + when(volumeDao.createSearchBuilder()).thenReturn(volumeSb); + + NicVO nic = mock(NicVO.class); + SearchBuilder nicSb = mock(SearchBuilder.class); + when(nicSb.entity()).thenReturn(nic); + when(nicDao.createSearchBuilder()).thenReturn(nicSb); + + SearchCriteria sc = mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + + Pair, Integer> result = new Pair<>(List.of(sharedFS), 1); + when(sharedFSDao.searchAndCount(any(), any())).thenReturn(result); + SharedFSJoinVO sharedFSJoinVO = mock(SharedFSJoinVO.class); + when(sharedFSJoinDao.searchByIds(List.of(s_sharedFSId).toArray(new Long[0]))).thenReturn(List.of(sharedFSJoinVO)); + + when(owner.getId()).thenReturn(s_ownerId); + when(accountMgr.isRootAdmin(any())).thenReturn(true); + when(sharedFSJoinDao.createSharedFSResponses(any(), any())).thenReturn(null); + + ListSharedFSCmd cmd = getMockListSharedFSCmd(); + sharedFSServiceImpl.searchForSharedFS(ResponseObject.ResponseView.Restricted, cmd); + + verify(sc, times(1)).setParameters("id", s_sharedFSId); + verify(sc, times(1)).setParameters("name", s_name); + verify(sc, times(1)).setParameters("dataCenterId", s_zoneId); + verify(sc, times(1)).setParameters("serviceOfferingId", s_serviceOfferingId); + verify(sc, times(1)).setJoinParameters("volSearch", "diskOfferingId", s_diskOfferingId); + verify(sc, times(1)).setJoinParameters("nicSearch", "networkId", s_networkId); + verify(sharedFSDao, times(1)).searchAndCount(any(), any()); + } + + @Test + public void testCleanupSharedFS() throws NoTransitionException { + SharedFSVO sharedFS = getMockSharedFS(); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Destroyed); + when(sharedFSDao.listSharedFSToBeDestroyed(any(Date.class))).thenReturn(List.of(sharedFS)); + try (MockedStatic globalLockMocked = Mockito.mockStatic(GlobalLock.class)) { + GlobalLock scanlock = mock(GlobalLock.class); + when(GlobalLock.getInternLock("sharedfsservice.cleanup")).thenReturn(scanlock); + when(scanlock.lock(30)).thenReturn(true); + sharedFSServiceImpl.cleanupSharedFS(true); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.ExpungeOperation, null, sharedFSDao); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.OperationFailed, null, sharedFSDao); + } + } + + @Test + public void testCleanupSharedFSInvalidState() throws NoTransitionException { + SharedFSVO sharedFS = getMockSharedFS(); + ReflectionTestUtils.setField(sharedFS, "state", SharedFS.State.Stopped); + when(sharedFSDao.listSharedFSToBeDestroyed(any(Date.class))).thenReturn(List.of(sharedFS)); + try (MockedStatic globalLockMocked = Mockito.mockStatic(GlobalLock.class)) { + GlobalLock scanlock = mock(GlobalLock.class); + when(GlobalLock.getInternLock("sharedfsservice.cleanup")).thenReturn(scanlock); + when(scanlock.lock(30)).thenReturn(true); + sharedFSServiceImpl.cleanupSharedFS(true); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.ExpungeOperation, null, sharedFSDao); + verify(_stateMachine, times(1)).transitTo(sharedFS, SharedFS.Event.OperationFailed, null, sharedFSDao); + } + } +} diff --git a/server/src/test/java/org/apache/cloudstack/storage/sharedfs/query/dao/SharedFSJoinDaoImplTest.java b/server/src/test/java/org/apache/cloudstack/storage/sharedfs/query/dao/SharedFSJoinDaoImplTest.java new file mode 100644 index 00000000000..74d3ce854cc --- /dev/null +++ b/server/src/test/java/org/apache/cloudstack/storage/sharedfs/query/dao/SharedFSJoinDaoImplTest.java @@ -0,0 +1,152 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.sharedfs.query.dao; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.List; + +import org.apache.cloudstack.api.ResponseObject; +import org.apache.cloudstack.api.response.SharedFSResponse; +import org.apache.cloudstack.storage.sharedfs.SharedFS; +import org.apache.cloudstack.storage.sharedfs.SharedFSVO; +import org.apache.cloudstack.storage.sharedfs.query.vo.SharedFSJoinVO; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.api.ApiDBUtils; +import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkVO; +import com.cloud.storage.Storage; +import com.cloud.storage.VolumeStats; +import com.cloud.user.VmDiskStatisticsVO; +import com.cloud.user.dao.VmDiskStatisticsDao; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.vm.NicVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.NicDao; + +@RunWith(MockitoJUnitRunner.class) +public class SharedFSJoinDaoImplTest { + @Mock + NicDao nicDao; + + @Mock + NetworkDao networkDao; + + @Mock + private VmDiskStatisticsDao vmDiskStatsDao; + + @Spy + @InjectMocks + SharedFSJoinDaoImpl sharedFSJoinDao; + + private AutoCloseable closeable; + + @Before + public void setUp() throws Exception { + closeable = MockitoAnnotations.openMocks(this); + } + + @After + public void tearDown() throws Exception { + closeable.close(); + } + + @Test + public void testNewSharedFSView() { + SharedFSVO sharedfs = mock(SharedFSVO.class); + Long id = 1L; + when(sharedfs.getId()).thenReturn(id); + SharedFSJoinVO sharedFSJoinVO = mock(SharedFSJoinVO.class); + + SearchBuilder sb = Mockito.mock(SearchBuilder.class); + ReflectionTestUtils.setField(sharedFSJoinDao, "fsSearch", sb); + SearchCriteria sc = Mockito.mock(SearchCriteria.class); + Mockito.when(sb.create()).thenReturn(sc); + Mockito.doReturn(List.of(sharedFSJoinVO)).when(sharedFSJoinDao).searchIncludingRemoved( + Mockito.any(SearchCriteria.class), Mockito.eq(null), Mockito.eq(null), + Mockito.eq(false)); + + sharedFSJoinDao.newSharedFSView(sharedfs); + + Mockito.verify(sc).setParameters("id", id); + Mockito.verify(sharedFSJoinDao, Mockito.times(1)).searchIncludingRemoved( + Mockito.any(SearchCriteria.class), Mockito.eq(null), Mockito.eq(null), + Mockito.eq(false)); + } + + @Test + public void newSharedFSResponse() { + Long s_ownerId = 1L; + Long s_zoneId = 2L; + Long s_volumeId = 3L; + Long s_vmId = 4L; + Long s_networkId = 5L; + String s_fsFormat = "EXT4"; + SharedFS.State state = SharedFS.State.Ready; + VirtualMachine.State vmState = VirtualMachine.State.Running; + Storage.ProvisioningType provisioningType = Storage.ProvisioningType.THIN; + + SharedFSJoinVO sharedFSJoinVO = mock(SharedFSJoinVO.class); + when(sharedFSJoinVO.getAccountId()).thenReturn(s_ownerId); + when(sharedFSJoinVO.getZoneId()).thenReturn(s_zoneId); + when(sharedFSJoinVO.getVolumeId()).thenReturn(s_volumeId); + when(sharedFSJoinVO.getInstanceId()).thenReturn(s_vmId); + when(sharedFSJoinVO.getState()).thenReturn(state); + when(sharedFSJoinVO.getFsType()).thenReturn(SharedFS.FileSystemType.valueOf(s_fsFormat)); + when(sharedFSJoinVO.getInstanceState()).thenReturn(vmState); + when(sharedFSJoinVO.getProvisioningType()).thenReturn(provisioningType); + + NicVO nic = mock(NicVO.class); + NetworkVO network = mock(NetworkVO.class); + when(nic.getNetworkId()).thenReturn(s_networkId); + when(nicDao.listByVmId(s_vmId)).thenReturn(List.of(nic)); + when(networkDao.findById(s_networkId)).thenReturn(network); + + VmDiskStatisticsVO diskStats = mock(VmDiskStatisticsVO.class); + when(vmDiskStatsDao.findBy(s_ownerId, s_zoneId, s_vmId, s_volumeId)).thenReturn(diskStats); + + VolumeStats vs = mock(VolumeStats.class); + String path = "volumepath"; + when(sharedFSJoinVO.getVolumeFormat()).thenReturn(Storage.ImageFormat.QCOW2); + when(sharedFSJoinVO.getVolumePath()).thenReturn(path); + + try (MockedStatic apiDBUtilsMocked = Mockito.mockStatic(ApiDBUtils.class)) { + when(ApiDBUtils.getVolumeStatistics(path)).thenReturn(vs); + SharedFSResponse response = sharedFSJoinDao.newSharedFSResponse(ResponseObject.ResponseView.Restricted, sharedFSJoinVO); + Assert.assertEquals(ReflectionTestUtils.getField(response, "state"), state.toString()); + Assert.assertEquals(ReflectionTestUtils.getField(response, "virtualMachineState"), vmState.toString()); + Assert.assertEquals(ReflectionTestUtils.getField(response, "provisioningType"), provisioningType.toString()); + } + + } +} diff --git a/server/src/test/java/org/apache/cloudstack/user/UserPasswordResetManagerImplTest.java b/server/src/test/java/org/apache/cloudstack/user/UserPasswordResetManagerImplTest.java new file mode 100644 index 00000000000..17092e6311d --- /dev/null +++ b/server/src/test/java/org/apache/cloudstack/user/UserPasswordResetManagerImplTest.java @@ -0,0 +1,150 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.user; + +import com.cloud.user.UserAccount; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.resourcedetail.UserDetailVO; +import org.apache.cloudstack.resourcedetail.dao.UserDetailsDao; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.Collections; +import java.util.Map; + +import static org.apache.cloudstack.resourcedetail.UserDetailVO.PasswordResetToken; +import static org.apache.cloudstack.resourcedetail.UserDetailVO.PasswordResetTokenExpiryDate; + +@RunWith(MockitoJUnitRunner.class) +public class UserPasswordResetManagerImplTest { + @Spy + @InjectMocks + UserPasswordResetManagerImpl passwordReset; + + @Mock + private UserDetailsDao userDetailsDao; + + @Test + public void testGetMessageBody() { + ConfigKey passwordResetMailTemplate = Mockito.mock(ConfigKey.class); + UserPasswordResetManagerImpl.PasswordResetMailTemplate = passwordResetMailTemplate; + Mockito.when(passwordResetMailTemplate.value()).thenReturn("Hello {{username}}!\n" + + "You have requested to reset your password. Please click the following link to reset your password:\n" + + "{{{resetLink}}}\n" + + "If you did not request a password reset, please ignore this email.\n" + + "\n" + + "Regards,\n" + + "The CloudStack Team"); + + UserAccount userAccount = Mockito.mock(UserAccount.class); + Mockito.when(userAccount.getUsername()).thenReturn("test_user"); + + String messageBody = passwordReset.getMessageBody(userAccount, "reset_token", "reset_link"); + String expectedMessageBody = "Hello test_user!\n" + + "You have requested to reset your password. Please click the following link to reset your password:\n" + + "reset_link\n" + + "If you did not request a password reset, please ignore this email.\n" + + "\n" + + "Regards,\n" + + "The CloudStack Team"; + Assert.assertEquals("Message body doesn't match", expectedMessageBody, messageBody); + } + + @Test + public void testValidateAndResetPassword() { + UserAccount userAccount = Mockito.mock(UserAccount.class); + Mockito.when(userAccount.getId()).thenReturn(1L); + Mockito.when(userAccount.getUsername()).thenReturn("test_user"); + + Mockito.doNothing().when(passwordReset).resetPassword(userAccount, "new_password"); + + UserDetailVO resetTokenDetail = Mockito.mock(UserDetailVO.class); + UserDetailVO resetTokenExpiryDate = Mockito.mock(UserDetailVO.class); + Mockito.when(userDetailsDao.findDetail(1L, PasswordResetToken)).thenReturn(resetTokenDetail); + Mockito.when(userDetailsDao.findDetail(1L, PasswordResetTokenExpiryDate)).thenReturn(resetTokenExpiryDate); + Mockito.when(resetTokenExpiryDate.getValue()).thenReturn(String.valueOf(System.currentTimeMillis() - 5 * 60 * 1000)); + + try { + passwordReset.validateAndResetPassword(userAccount, "reset_token", "new_password"); + Assert.fail("Should have thrown exception"); + } catch (ServerApiException e) { + Assert.assertEquals("No reset token found for user test_user", e.getMessage()); + } + + Mockito.when(resetTokenDetail.getValue()).thenReturn("reset_token_XXX"); + + try { + passwordReset.validateAndResetPassword(userAccount, "reset_token", "new_password"); + Assert.fail("Should have thrown exception"); + } catch (ServerApiException e) { + Assert.assertEquals("Invalid reset token for user test_user", e.getMessage()); + } + + Mockito.when(resetTokenDetail.getValue()).thenReturn("reset_token"); + + try { + passwordReset.validateAndResetPassword(userAccount, "reset_token", "new_password"); + Assert.fail("Should have thrown exception"); + } catch (ServerApiException e) { + Assert.assertEquals("Reset token has expired for user test_user", e.getMessage()); + } + + Mockito.when(resetTokenExpiryDate.getValue()).thenReturn(String.valueOf(System.currentTimeMillis() + 5 * 60 * 1000)); + + Assert.assertTrue(passwordReset.validateAndResetPassword(userAccount, "reset_token", "new_password")); + Mockito.verify(passwordReset, Mockito.times(1)).resetPassword(userAccount, "new_password"); + } + + @Test + public void testValidateExistingTokenFirstRequest() { + UserAccount userAccount = Mockito.mock(UserAccount.class); + Mockito.when(userAccount.getId()).thenReturn(1L); + Mockito.when(userDetailsDao.listDetailsKeyPairs(1L)).thenReturn(Collections.emptyMap()); + + Assert.assertTrue(passwordReset.validateExistingToken(userAccount)); + } + + @Test + public void testValidateExistingTokenSecondRequestExpired() { + UserAccount userAccount = Mockito.mock(UserAccount.class); + Mockito.when(userAccount.getId()).thenReturn(1L); + Mockito.when(userDetailsDao.listDetailsKeyPairs(1L)).thenReturn(Map.of( + PasswordResetToken, "reset_token", + PasswordResetTokenExpiryDate, String.valueOf(System.currentTimeMillis() - 5 * 60 * 1000))); + + Assert.assertTrue(passwordReset.validateExistingToken(userAccount)); + } + + + @Test + public void testValidateExistingTokenSecondRequestUnexpired() { + UserAccount userAccount = Mockito.mock(UserAccount.class); + Mockito.when(userAccount.getId()).thenReturn(1L); + Mockito.when(userDetailsDao.listDetailsKeyPairs(1L)).thenReturn(Map.of( + PasswordResetToken, "reset_token", + PasswordResetTokenExpiryDate, String.valueOf(System.currentTimeMillis() + 5 * 60 * 1000))); + + Assert.assertFalse(passwordReset.validateExistingToken(userAccount)); + } +} diff --git a/server/src/test/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImplTest.java b/server/src/test/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImplTest.java index 81358d99ae7..f3ba600817f 100644 --- a/server/src/test/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImplTest.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.vm; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.ArgumentMatchers.anyString; @@ -70,6 +71,8 @@ import org.mockito.junit.MockitoJUnitRunner; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CheckConvertInstanceAnswer; +import com.cloud.agent.api.CheckConvertInstanceCommand; import com.cloud.agent.api.CheckVolumeAnswer; import com.cloud.agent.api.CheckVolumeCommand; import com.cloud.agent.api.ConvertInstanceAnswer; @@ -587,6 +590,7 @@ public class UnmanagedVMsManagerImplTest { String host = "192.168.1.10"; String vmName = "TestInstanceFromVmware"; instance.setName(vmName); + String tmplFileName = "5b8d689a-e61a-4ac3-9b76-e121ff90fbd3"; long newVmId = 2L; long networkId = 1L; when(vmDao.getNextInSequence(Long.class, "id")).thenReturn(newVmId); @@ -615,8 +619,10 @@ public class UnmanagedVMsManagerImplTest { HypervisorGuru vmwareGuru = mock(HypervisorGuru.class); when(hypervisorGuruManager.getGuru(Hypervisor.HypervisorType.VMware)).thenReturn(vmwareGuru); - when(vmwareGuru.cloneHypervisorVMOutOfBand(anyString(), anyString(), anyMap())).thenReturn(instance); + when(vmwareGuru.getHypervisorVMOutOfBandAndCloneIfRequired(anyString(), anyString(), anyMap())).thenReturn(new Pair<>(instance, true)); when(vmwareGuru.removeClonedHypervisorVMOutOfBand(anyString(), anyString(), anyMap())).thenReturn(true); + when(vmwareGuru.createVMTemplateOutOfBand(anyString(), anyString(), anyMap(), any(DataStoreTO.class), anyInt())).thenReturn(tmplFileName); + when(vmwareGuru.removeVMTemplateOutOfBand(any(DataStoreTO.class), anyString())).thenReturn(true); HostVO convertHost = mock(HostVO.class); long convertHostId = 1L; @@ -640,6 +646,7 @@ public class UnmanagedVMsManagerImplTest { when(destPool.getDataCenterId()).thenReturn(zoneId); when(destPool.getClusterId()).thenReturn(null); when(destPool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem); + StoragePoolVO zoneDestPool = mock(StoragePoolVO.class); if (selectTemporaryStorage) { long temporaryStoragePoolId = 1L; when(importVmCmd.getConvertStoragePoolId()).thenReturn(temporaryStoragePoolId); @@ -651,8 +658,9 @@ public class UnmanagedVMsManagerImplTest { when(imageStoreDao.findOneByZoneAndProtocol(zoneId, "nfs")).thenReturn(imageStoreVO); when(dataStoreManager.getDataStore(1L, DataStoreRole.Image)).thenReturn(dataStore); } - when(primaryDataStoreDao.listPoolsByCluster(clusterId)).thenReturn(List.of(destPool)); when(primaryDataStoreDao.listPoolByHostPath(Mockito.anyString(), Mockito.anyString())).thenReturn(List.of(destPool)); + when(primaryDataStoreDao.findClusterWideStoragePoolsByHypervisorAndPoolType(clusterId, Hypervisor.HypervisorType.KVM, Storage.StoragePoolType.NetworkFilesystem)).thenReturn(List.of(destPool)); + when(primaryDataStoreDao.findZoneWideStoragePoolsByHypervisorAndPoolType(zoneId, Hypervisor.HypervisorType.KVM, Storage.StoragePoolType.NetworkFilesystem)).thenReturn(List.of(zoneDestPool)); if (VcenterParameter.EXISTING == vcenterParameter) { VmwareDatacenterVO datacenterVO = mock(VmwareDatacenterVO.class); @@ -680,17 +688,25 @@ public class UnmanagedVMsManagerImplTest { when(vmwareDatacenterDao.findById(existingDatacenterId)).thenReturn(null); } - ConvertInstanceAnswer answer = mock(ConvertInstanceAnswer.class); - when(answer.getResult()).thenReturn(vcenterParameter != VcenterParameter.CONVERT_FAILURE); - when(answer.getConvertedInstance()).thenReturn(instance); + CheckConvertInstanceAnswer checkConvertInstanceAnswer = mock(CheckConvertInstanceAnswer.class); + when(checkConvertInstanceAnswer.getResult()).thenReturn(vcenterParameter != VcenterParameter.CONVERT_FAILURE); if (VcenterParameter.AGENT_UNAVAILABLE != vcenterParameter) { - when(agentManager.send(Mockito.eq(convertHostId), any(ConvertInstanceCommand.class))).thenReturn(answer); + when(agentManager.send(Mockito.eq(convertHostId), Mockito.any(CheckConvertInstanceCommand.class))).thenReturn(checkConvertInstanceAnswer); + } + + ConvertInstanceAnswer convertInstanceAnswer = mock(ConvertInstanceAnswer.class); + when(convertInstanceAnswer.getResult()).thenReturn(vcenterParameter != VcenterParameter.CONVERT_FAILURE); + when(convertInstanceAnswer.getConvertedInstance()).thenReturn(instance); + if (VcenterParameter.AGENT_UNAVAILABLE != vcenterParameter) { + when(agentManager.send(Mockito.eq(convertHostId), Mockito.any(ConvertInstanceCommand.class))).thenReturn(convertInstanceAnswer); } try (MockedStatic ignored = Mockito.mockStatic(UsageEventUtils.class)) { unmanagedVMsManager.importVm(importVmCmd); - verify(vmwareGuru).cloneHypervisorVMOutOfBand(Mockito.eq(host), Mockito.eq(vmName), anyMap()); + verify(vmwareGuru).getHypervisorVMOutOfBandAndCloneIfRequired(Mockito.eq(host), Mockito.eq(vmName), anyMap()); + verify(vmwareGuru).createVMTemplateOutOfBand(Mockito.eq(host), Mockito.eq(vmName), anyMap(), any(DataStoreTO.class), anyInt()); verify(vmwareGuru).removeClonedHypervisorVMOutOfBand(Mockito.eq(host), Mockito.eq(vmName), anyMap()); + verify(vmwareGuru).removeVMTemplateOutOfBand(any(DataStoreTO.class), anyString()); } } @@ -798,7 +814,7 @@ public class UnmanagedVMsManagerImplTest { long poolId = 1L; when(primaryDataStoreDao.findById(poolId)).thenReturn(null); - unmanagedVMsManager.selectInstanceConversionTemporaryLocation(cluster, poolId, null); + unmanagedVMsManager.selectInstanceConversionTemporaryLocation(cluster, poolId); } @Test(expected = CloudRuntimeException.class) @@ -809,7 +825,7 @@ public class UnmanagedVMsManagerImplTest { Mockito.when(pool.getScope()).thenReturn(ScopeType.CLUSTER); Mockito.when(pool.getClusterId()).thenReturn(100L); when(primaryDataStoreDao.findById(poolId)).thenReturn(pool); - unmanagedVMsManager.selectInstanceConversionTemporaryLocation(cluster, poolId, null); + unmanagedVMsManager.selectInstanceConversionTemporaryLocation(cluster, poolId); } @Test(expected = CloudRuntimeException.class) @@ -819,9 +835,7 @@ public class UnmanagedVMsManagerImplTest { StoragePoolVO pool = mock(StoragePoolVO.class); Mockito.when(pool.getScope()).thenReturn(ScopeType.HOST); when(primaryDataStoreDao.findById(poolId)).thenReturn(pool); - HostVO convertHost = Mockito.mock(HostVO.class); - Mockito.when(convertHost.getId()).thenReturn(1L); - unmanagedVMsManager.selectInstanceConversionTemporaryLocation(cluster, poolId, convertHost); + unmanagedVMsManager.selectInstanceConversionTemporaryLocation(cluster, poolId); } @Test(expected = CloudRuntimeException.class) @@ -832,16 +846,15 @@ public class UnmanagedVMsManagerImplTest { Mockito.when(pool.getScope()).thenReturn(ScopeType.CLUSTER); Mockito.when(pool.getClusterId()).thenReturn(1L); when(primaryDataStoreDao.findById(poolId)).thenReturn(pool); - HostVO convertHost = Mockito.mock(HostVO.class); Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.RBD); - unmanagedVMsManager.selectInstanceConversionTemporaryLocation(cluster, poolId, convertHost); + unmanagedVMsManager.selectInstanceConversionTemporaryLocation(cluster, poolId); } @Test(expected = CloudRuntimeException.class) public void testSelectInstanceConversionTemporaryLocationNoPoolAvailable() { ClusterVO cluster = getClusterForTests(); Mockito.when(imageStoreDao.findOneByZoneAndProtocol(anyLong(), anyString())).thenReturn(null); - unmanagedVMsManager.selectInstanceConversionTemporaryLocation(cluster, null, null); + unmanagedVMsManager.selectInstanceConversionTemporaryLocation(cluster, null); } @Test diff --git a/server/src/test/resources/StoragePoolDaoTestContext.xml b/server/src/test/resources/StoragePoolDaoTestContext.xml index 979963fadab..49d10c713e8 100644 --- a/server/src/test/resources/StoragePoolDaoTestContext.xml +++ b/server/src/test/resources/StoragePoolDaoTestContext.xml @@ -36,7 +36,7 @@
    - diff --git a/server/src/test/resources/VpcApiUnitTestContext.xml b/server/src/test/resources/VpcApiUnitTestContext.xml index f09c16bf46c..f3f7816d4dd 100644 --- a/server/src/test/resources/VpcApiUnitTestContext.xml +++ b/server/src/test/resources/VpcApiUnitTestContext.xml @@ -1,19 +1,19 @@ - - + diff --git a/server/src/test/resources/VpcTestContext.xml b/server/src/test/resources/VpcTestContext.xml index c9575734ba4..ae60cfabd9d 100644 --- a/server/src/test/resources/VpcTestContext.xml +++ b/server/src/test/resources/VpcTestContext.xml @@ -1,12 +1,12 @@ - - + @@ -70,7 +70,7 @@ - + diff --git a/server/src/test/resources/appLoadBalancer.xml b/server/src/test/resources/appLoadBalancer.xml index 403aff65c86..6ae64d4b601 100644 --- a/server/src/test/resources/appLoadBalancer.xml +++ b/server/src/test/resources/appLoadBalancer.xml @@ -1,19 +1,19 @@ - @@ -33,11 +33,11 @@ -
    +
    - + - + diff --git a/server/src/test/resources/testContext.xml b/server/src/test/resources/testContext.xml index c267648da05..7ea4d4247b1 100644 --- a/server/src/test/resources/testContext.xml +++ b/server/src/test/resources/testContext.xml @@ -17,13 +17,13 @@ under the License. --> - + @@ -49,7 +49,7 @@ - + @@ -91,12 +91,12 @@ under the License. 1 NM - 1 + 1 10.91.28.1 10.91.28.0/24 10.91.28.160-10.91.28.179 - --> + --> - - @@ -217,12 +217,12 @@ under the License. admin@mailprovider.com - + - + + --> instance.name TEST + --> integration.api.port 8096 @@ -272,7 +272,7 @@ under the License. The memory.capacity.threshold is a percentage value (e.g. 0.85 is 85%). Whenever the Percent Used memory in a pod exceeds this threshold, our software will alert you. - --> + --> memory.capacity.threshold 0.85 @@ -349,7 +349,7 @@ under the License. either a load balancer if clustering is used, or the management server if a single server is installed. If the port to use is 80, the ":8080" portion of the value below can be removed. - + In the vast majority of cases, all you need to change is the host, from example.com to whatever IP address or host name of your management server / load balancer. --> @@ -360,11 +360,11 @@ under the License. @@ -415,7 +415,7 @@ under the License. false - %s" % command) @@ -3468,7 +3552,7 @@ class TestAccountBasedIngressRules(cloudstackTestCase): self.debug("Getting SSH client of virtual machine 1: %s" % self.virtual_machine_1.id) try: - sshClient = self.virtual_machine_1.get_ssh_client(ipaddress=self.virtual_machine_1.nic[0].ipaddress) + sshClient = self.virtual_machine_1.get_ssh_client(ipaddress=self.virtual_machine_1.nic[0].ipaddress, retries=5) self.debug("SSHing into vm_2 %s from vm_1 %s" % (self.virtual_machine_2.nic[0].ipaddress,self.virtual_machine_1.nic[0].ipaddress)) command = "ssh -t -t root@%s" % self.virtual_machine_2.nic[0].ipaddress self.debug("command: --> %s" % command) diff --git a/test/integration/component/test_assign_vm.py b/test/integration/component/test_assign_vm.py index c2ee442e32e..e01f6fbc686 100644 --- a/test/integration/component/test_assign_vm.py +++ b/test/integration/component/test_assign_vm.py @@ -18,9 +18,9 @@ """ """ #Import Local Modules -from nose.plugins.attrib import attr -from marvin.cloudstackTestCase import cloudstackTestCase -from marvin.lib.base import (Account, +from nose.plugins.attrib import attr +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.lib.base import (Account, Domain, User, Project, diff --git a/test/integration/component/test_egress_fw_rules.py b/test/integration/component/test_egress_fw_rules.py index 7e045a6f845..780f9f2615d 100644 --- a/test/integration/component/test_egress_fw_rules.py +++ b/test/integration/component/test_egress_fw_rules.py @@ -18,10 +18,10 @@ """ """ #Import Local Modules -from nose.plugins.attrib import attr -from marvin.cloudstackTestCase import cloudstackTestCase +from nose.plugins.attrib import attr +from marvin.cloudstackTestCase import cloudstackTestCase import unittest -from marvin.lib.base import (Account, +from marvin.lib.base import (Account, Domain, Router, Network, diff --git a/test/integration/component/test_interop_xd_ccp.py b/test/integration/component/test_interop_xd_ccp.py index ae5a9ae9ae7..dc579336590 100644 --- a/test/integration/component/test_interop_xd_ccp.py +++ b/test/integration/component/test_interop_xd_ccp.py @@ -571,7 +571,7 @@ class TestXDCCPInterop(cloudstackTestCase): id=self.volume.id ) - if list_volume_response1[0].virtualmachineid is None: + if list_volume_response1[0].virtualmachineid is None: self.skipTest("Check if volume is attached to the VM before detach") self.virtual_machine.detach_volume(self.user_api_client, self.volume) @@ -618,7 +618,7 @@ class TestXDCCPInterop(cloudstackTestCase): id=self.volume.id ) - if list_volume_response1[0].virtualmachineid is not None: + if list_volume_response1[0].virtualmachineid is not None: self.skipTest("Check if volume is detached before deleting") cmd = deleteVolume.deleteVolumeCmd() diff --git a/test/integration/component/test_ldap.py b/test/integration/component/test_ldap.py index 6c6179e292b..8a9fd4cf5e6 100644 --- a/test/integration/component/test_ldap.py +++ b/test/integration/component/test_ldap.py @@ -52,12 +52,7 @@ class TestLdap(cloudstackTestCase): @classmethod def tearDownClass(cls): - try: - cleanup_resources(cls.api_client, cls._cleanup) - - except Exception as tde: - raise Exception("Warning: Exception during cleanup : %s" % tde) - return + super(TestLdap, cls).tearDownClass() def setUp(self): diff --git a/test/integration/component/test_rootvolume_resize.py b/test/integration/component/test_rootvolume_resize.py index 7e58d1e3f42..fa2d4a018cd 100644 --- a/test/integration/component/test_rootvolume_resize.py +++ b/test/integration/component/test_rootvolume_resize.py @@ -647,7 +647,7 @@ class TestResizeVolume(cloudstackTestCase): self.assertEqual(virtualmachine_snapshot.id, virtulmachine_snapshot_list[0].id, "Virtual Machine Snapshot id do not match") - except Exception as e: + except Exception as e: raise Exception("Issue CLOUDSTACK-10080: Exception while performing" " vmsnapshot: %s" % e) else: diff --git a/test/integration/component/test_security_groups.py b/test/integration/component/test_security_groups.py index 1a268d692eb..449fe10cb28 100644 --- a/test/integration/component/test_security_groups.py +++ b/test/integration/component/test_security_groups.py @@ -28,7 +28,10 @@ from marvin.lib.base import (Account, SecurityGroup, Router, Host, - Network) + Network, + PhysicalNetwork, + TrafficType, + NetworkServiceProvider) from marvin.lib.common import (get_domain, get_zone, get_template, @@ -73,6 +76,26 @@ class TestDefaultSecurityGroup(cloudstackTestCase): # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) + if cls.zone.securitygroupsenabled is False: + physical_networks = PhysicalNetwork.list(cls.api_client, zoneid=cls.zone.id) + selected_physical_network = None + for net in physical_networks: + traffic_types = TrafficType.list(cls.api_client, physicalnetworkid=net.id) + for traffic_type in traffic_types: + if traffic_type.traffictype == 'Guest': + selected_physical_network = net + break + if selected_physical_network is not None: + break + if selected_physical_network is None: + raise Exception("No physical network found with guest traffic type") + + # Enable security group provider for physical network + nsps = NetworkServiceProvider.list(cls.api_client, physicalnetworkid=selected_physical_network.id, name='SecurityGroupProvider') + if len(nsps) == 0: + raise Exception("No security group provider found for physical network") + NetworkServiceProvider.update(cls.api_client, nsps[0].id, state='Enabled') + cls.testdata['mode'] = cls.zone.networktype template = get_template( @@ -115,7 +138,7 @@ class TestDefaultSecurityGroup(cloudstackTestCase): return - @attr(tags=["sg", "basic", "eip", "advancedsg"]) + @attr(tags=["sg", "basic", "eip", "advancedsg", "advanced"]) def test_01_deployVM_InDefaultSecurityGroup(self): """Test deploy VM in default security group """ @@ -193,7 +216,7 @@ class TestDefaultSecurityGroup(cloudstackTestCase): ) return - @attr(tags=["sg", "basic", "eip", "advancedsg"]) + @attr(tags=["sg", "basic", "eip", "advancedsg", "advanced"]) def test_02_listSecurityGroups(self): """Test list security groups for admin account """ @@ -228,7 +251,7 @@ class TestDefaultSecurityGroup(cloudstackTestCase): ) return - @attr(tags=["sg", "basic", "eip", "advancedsg"]) + @attr(tags=["sg", "basic", "eip", "advancedsg", "advanced"]) def test_03_accessInDefaultSecurityGroup(self): """Test access in default security group """ @@ -314,7 +337,8 @@ class TestDefaultSecurityGroup(cloudstackTestCase): self.virtual_machine.ssh_ip, self.virtual_machine.ssh_port, self.virtual_machine.username, - self.virtual_machine.password + self.virtual_machine.password, + retries=5 ) return @@ -453,7 +477,7 @@ class TestAuthorizeIngressRule(cloudstackTestCase): # Should be able to SSH VM try: self.debug("SSH into VM: %s" % self.virtual_machine.id) - self.virtual_machine.get_ssh_client() + self.virtual_machine.get_ssh_client(retries=5) except Exception as e: self.fail("SSH Access failed for %s: %s" % (self.virtual_machine.ipaddress, e) @@ -604,7 +628,7 @@ class TestRevokeIngressRule(cloudstackTestCase): # Should be able to SSH VM try: self.debug("SSH into VM: %s" % self.virtual_machine.id) - self.virtual_machine.get_ssh_client() + self.virtual_machine.get_ssh_client(retries=5) except Exception as e: self.fail("SSH Access failed for %s: %s" % (self.virtual_machine.ipaddress, e) @@ -937,7 +961,7 @@ class TestdeployVMWithUserData(cloudstackTestCase): % self.virtual_machine.ssh_ip ) - ssh = self.virtual_machine.get_ssh_client() + ssh = self.virtual_machine.get_ssh_client(retries=5) except Exception as e: self.fail("SSH Access failed for %s: %s" % (self.virtual_machine.ipaddress, e) @@ -1348,7 +1372,7 @@ class TestIngressRule(cloudstackTestCase): self.virtual_machine.ssh_ip, self.testdata["ingress_rule"]["endport"] )) - self.virtual_machine.get_ssh_client() + self.virtual_machine.get_ssh_client(retries=5) except Exception as e: self.fail("SSH access failed for ingress rule ID: %s, %s" @@ -1476,7 +1500,7 @@ class TestIngressRule(cloudstackTestCase): self.virtual_machine.ssh_ip, self.testdata["ingress_rule"]["endport"] )) - self.virtual_machine.get_ssh_client() + self.virtual_machine.get_ssh_client(retries=5) except Exception as e: self.fail("SSH access failed for ingress rule ID: %s, %s" @@ -1623,7 +1647,7 @@ class TestIngressRule(cloudstackTestCase): self.debug( "Trying to SSH into VM %s" % self.virtual_machine.ssh_ip) - self.virtual_machine.get_ssh_client() + self.virtual_machine.get_ssh_client(retries=5) except Exception as e: self.fail("SSH access failed for ingress rule ID: %s" % ingress_rule["id"] @@ -1642,7 +1666,7 @@ class TestIngressRule(cloudstackTestCase): self.debug( "Trying to SSH into VM %s" % self.virtual_machine.ssh_ip) - self.virtual_machine.get_ssh_client() + self.virtual_machine.get_ssh_client(retries=5) except Exception as e: self.fail("SSH access failed for ingress rule ID: %s" % ingress_rule["id"] diff --git a/test/integration/component/test_tags.py b/test/integration/component/test_tags.py index 2141384eaf7..6ee5d3c6ea9 100644 --- a/test/integration/component/test_tags.py +++ b/test/integration/component/test_tags.py @@ -2426,7 +2426,7 @@ class TestResourceTags(cloudstackTestCase): hosts.pop(0) host_ids = [host.id for host in hosts] for id in host_ids: - if not id in host_ids_for_migration: + if id not in host_ids_for_migration: self.fail("Not all hosts are available for vm migration") return diff --git a/test/integration/plugins/ldap/test_ldap.py b/test/integration/plugins/ldap/test_ldap.py index fd0aecfab45..6746f3289f7 100644 --- a/test/integration/plugins/ldap/test_ldap.py +++ b/test/integration/plugins/ldap/test_ldap.py @@ -101,11 +101,13 @@ class TestLDAP(cloudstackTestCase): def tearDownClass(cls): cls.logger.info("Tearing Down Class") try: - cleanup_resources(cls.apiclient, reversed(cls._cleanup)) - cls.remove_ldap_configuration_for_domains() - cls.logger.debug("done cleaning up resources in tearDownClass(cls) %s") - except Exception as e: - cls.logger.debug("Exception in tearDownClass(cls): %s" % e) + super(TestLDAP, cls).tearDownClass() + finally: + try: + cls.remove_ldap_configuration_for_domains() + cls.logger.debug("done cleaning up resources in tearDownClass(cls) %s") + except Exception as e: + cls.logger.debug("Exception in tearDownClass(cls): %s" % e) def setUp(self): self.cleanup = [] @@ -116,11 +118,7 @@ class TestLDAP(cloudstackTestCase): return def tearDown(self): - try: - cleanup_resources(self.apiclient, self.cleanup) - except Exception as e: - raise Exception("Warning: Exception during cleanup : %s" % e) - return + super(TestLDAP, self).tearDown() @attr(tags=["smoke", "advanced"], required_hardware="false") def test_01_manual(self): @@ -349,8 +347,8 @@ class TestLDAP(cloudstackTestCase): if parent_domain: domain_to_create["parentdomainid"] = parent_domain tmpDomain = Domain.create(cls.apiclient, domain_to_create) - cls.logger.debug("Created domain %s with id %s " % (tmpDomain.name, tmpDomain.id)) cls._cleanup.append(tmpDomain) + cls.logger.debug("Created domain %s with id %s " % (tmpDomain.name, tmpDomain.id)) return tmpDomain @classmethod diff --git a/test/integration/plugins/quota/test_quota_balance.py b/test/integration/plugins/quota/test_quota_balance.py new file mode 100644 index 00000000000..f5c1c75d7b2 --- /dev/null +++ b/test/integration/plugins/quota/test_quota_balance.py @@ -0,0 +1,191 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +Test cases for validating the Quota balance of accounts +""" + +from marvin.cloudstackTestCase import * +from marvin.lib.utils import * +from marvin.lib.base import * +from marvin.lib.common import * +from nose.plugins.attrib import attr + + +class TestQuotaBalance(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestQuotaBalance, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__ + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) + cls.zone + + # Create Account + cls.account = Account.create( + cls.apiclient, + cls.services["account"], + domainid=cls.domain.id + ) + cls._cleanup = [ + cls.account, + ] + + cls.services["account"] = cls.account.name + + if not is_config_suitable(apiclient=cls.apiclient, name='quota.enable.service', value='true'): + cls.debug("Quota service is not enabled, therefore the configuration `quota.enable.service` will be set to `true` and the management server will be restarted.") + Configurations.update(cls.apiclient, "quota.enable.service", "true") + cls.restartServer() + + return + + @classmethod + def restartServer(cls): + """Restart management server""" + + cls.debug("Restarting management server") + sshClient = SshClient( + cls.mgtSvrDetails["mgtSvrIp"], + 22, + cls.mgtSvrDetails["user"], + cls.mgtSvrDetails["passwd"] + ) + + command = "service cloudstack-management restart" + sshClient.execute(command) + + # Waits for management to come up in 5 mins, when it's up it will continue + timeout = time.time() + 300 + while time.time() < timeout: + if cls.isManagementUp() is True: + time.sleep(30) + return + time.sleep(5) + return cls.fail("Management server did not come up, failing") + + @classmethod + def isManagementUp(cls): + try: + cls.apiclient.listInfrastructure(listInfrastructure.listInfrastructureCmd()) + return True + except Exception: + return False + + @classmethod + def tearDownClass(cls): + try: + # Cleanup resources used + cleanup_resources(cls.apiclient, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + self.tariffs = [] + return + + def tearDown(self): + try: + cleanup_resources(self.apiclient, self.cleanup) + self.delete_tariffs() + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def delete_tariffs(self): + for tariff in self.tariffs: + cmd = quotaTariffDelete.quotaTariffDeleteCmd() + cmd.id = tariff.uuid + self.apiclient.quotaTariffDelete(cmd) + + @attr(tags=["advanced", "smoke", "quota"], required_hardware="false") + def test_quota_balance(self): + """ + Test Quota balance + + Validate the following + 1. Add credits to an account + 2. Create Quota tariff for the usage type 21 (VM_DISK_IO_READ) + 3. Simulate quota usage by inserting a row in the `cloud_usage` table + 4. Update the balance of the account by calling the API quotaUpdate + 5. Verify the balance of the account according to the tariff created + """ + + # Create quota tariff for the usage type 21 (VM_DISK_IO_READ) + cmd = quotaTariffCreate.quotaTariffCreateCmd() + cmd.name = 'Tariff' + cmd.value = '10' + cmd.usagetype = '21' + self.tariffs.append(self.apiclient.quotaTariffCreate(cmd)) + + # Add credits to the account + cmd = quotaCredits.quotaCreditsCmd() + cmd.account = self.account.name + cmd.domainid = self.domain.id + cmd.value = 100 + self.apiclient.quotaCredits(cmd) + + # Fetch account ID from account_uuid + account_id_select = f"SELECT id FROM account WHERE uuid = '{self.account.id}';" + self.debug(account_id_select) + qresultset = self.dbclient.execute(account_id_select) + account_id = qresultset[0][0] + + # Fetch domain ID from domain_uuid + domain_id_select = f"SELECT id FROM `domain` d WHERE uuid = '{self.domain.id}';" + self.debug(domain_id_select) + qresultset = self.dbclient.execute(domain_id_select) + domain_id = qresultset[0][0] + + # Fetch zone ID from zone_uuid + zone_id_select = f"SELECT id from data_center dc where dc.uuid = '{self.zone.id}';" + self.debug(zone_id_select) + qresultset = self.dbclient.execute(zone_id_select) + zone_id = qresultset[0][0] + + start_date = datetime.datetime.now() + datetime.timedelta(seconds=1) + end_date = datetime.datetime.now() + datetime.timedelta(hours=1) + + # Manually insert a usage regarding the usage type 21 (VM_DISK_IO_READ) + sql_query = (f"INSERT INTO cloud_usage.cloud_usage (zone_id,account_id,domain_id,description,usage_display,usage_type,raw_usage,vm_instance_id,vm_name,offering_id,template_id," + f"usage_id,`type`,`size`,network_id,start_date,end_date,virtual_size,cpu_speed,cpu_cores,memory,quota_calculated,is_hidden,state)" + f" VALUES ('{zone_id}','{account_id}','{domain_id}','Test','1 Hrs',21,1,NULL,NULL,NULL,NULL,NULL,'VirtualMachine',NULL,NULL,'{start_date}','{end_date}',NULL,NULL,NULL,NULL,0,0,NULL);") + self.debug(sql_query) + self.dbclient.execute(sql_query) + + # Update quota to calculate the balance of the account + cmd = quotaUpdate.quotaUpdateCmd() + self.apiclient.quotaUpdate(cmd) + + # Retrieve the quota balance of the account + cmd = quotaBalance.quotaBalanceCmd() + cmd.domainid = self.account.domainid + cmd.account = self.account.name + response = self.apiclient.quotaBalance(cmd) + + self.debug(f"The quota balance for the account {self.account.name} is {response.balance}.") + self.assertEqual(response.balance.startquota, 90, f"The `startQuota` response field is supposed to be 90 but was {response.balance.startquota}.") + + return diff --git a/test/integration/plugins/storpool/TestStorPoolVolumes.py b/test/integration/plugins/storpool/TestStorPoolVolumes.py index 640a2f9d2bc..70f8900df58 100644 --- a/test/integration/plugins/storpool/TestStorPoolVolumes.py +++ b/test/integration/plugins/storpool/TestStorPoolVolumes.py @@ -2073,7 +2073,7 @@ class TestStoragePool(cloudstackTestCase): clusterid = c.id ) for conf in configuration: - if conf.name == 'sp.cluster.id' and (conf.value in clusterid[1]): + if conf.name == 'sp.cluster.id' and (conf.value in clusterid[1]): return c @classmethod @@ -2088,7 +2088,7 @@ class TestStoragePool(cloudstackTestCase): clusterid = c.id ) for conf in configuration: - if conf.name == 'sp.cluster.id' and (conf.value not in clusterid[1]): + if conf.name == 'sp.cluster.id' and (conf.value not in clusterid[1]): return c @classmethod diff --git a/test/integration/plugins/storpool/sp_util.py b/test/integration/plugins/storpool/sp_util.py index 569aa4a2539..70f36609af5 100644 --- a/test/integration/plugins/storpool/sp_util.py +++ b/test/integration/plugins/storpool/sp_util.py @@ -79,6 +79,11 @@ class TestData(): diskOfferingEncrypted2 = "diskOfferingEncrypted2" cephDiskOffering = "cephDiskOffering" nfsDiskOffering = "nfsDiskOffering" + diskOfferingTier1Tag = "diskOfferingTier1Tag" + diskOfferingTier2Tag = "diskOfferingTier2Tag" + diskOfferingTier1Template = "diskOfferingTier1Template" + diskOfferingTier2Template = "diskOfferingTier2Template" + diskOfferingWithTagsAndTempl = "diskOfferingWithTagsAndTempl" domainId = "domainId" hypervisor = "hypervisor" login = "login" @@ -278,6 +283,46 @@ class TestData(): TestData.tags: "nfs", "storagetype": "shared" }, + TestData.diskOfferingTier1Template: { + "name": "tier1-template", + "displaytext": "Tier1 using different StorPool template", + "custom": True, + "hypervisorsnapshotreserve": 200, + TestData.tags: sp_template_1, + "storagetype": "shared" + }, + TestData.diskOfferingTier2Template: { + "name": "tier2-template", + "displaytext": "Tier2 using different StorPool template", + "custom": True, + "hypervisorsnapshotreserve": 200, + TestData.tags: sp_template_1, + "storagetype": "shared" + }, + TestData.diskOfferingTier1Tag: { + "name": "tier1-tag", + "displaytext": "Tier1 using QOS tags", + "custom": True, + "hypervisorsnapshotreserve": 200, + TestData.tags: sp_template_1, + "storagetype": "shared" + }, + TestData.diskOfferingTier2Tag: { + "name": "tier2-tag", + "displaytext": "Tier2 using QOS tags", + "custom": True, + "hypervisorsnapshotreserve": 200, + TestData.tags: sp_template_1, + "storagetype": "shared" + }, + TestData.diskOfferingWithTagsAndTempl: { + "name": "tier2-tag-template", + "displaytext": "Tier2 using QOS tags and template", + "custom": True, + "hypervisorsnapshotreserve": 200, + TestData.tags: sp_template_1, + "storagetype": "shared" + }, TestData.volume_1: { TestData.diskName: "test-volume-1", }, @@ -384,7 +429,7 @@ class StorPoolHelper(): clusterid = c.id ) for conf in configuration: - if conf.name == 'sp.cluster.id' and (conf.value in clusterid[1]): + if conf.name == 'sp.cluster.id' and (conf.value in clusterid[1]): return c @classmethod @@ -399,7 +444,7 @@ class StorPoolHelper(): clusterid = c.id ) for conf in configuration: - if conf.name == 'sp.cluster.id' and (conf.value not in clusterid[1]): + if conf.name == 'sp.cluster.id' and (conf.value not in clusterid[1]): return c @classmethod diff --git a/test/integration/plugins/storpool/test_storpool_tiers.py b/test/integration/plugins/storpool/test_storpool_tiers.py new file mode 100644 index 00000000000..71758c24bed --- /dev/null +++ b/test/integration/plugins/storpool/test_storpool_tiers.py @@ -0,0 +1,544 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pprint +import uuid + +from marvin.cloudstackAPI import (listResourceDetails, addResourceDetail, changeOfferingForVolume) +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.codes import FAILED +from marvin.lib.base import (DiskOffering, + ServiceOffering, + StoragePool, + VirtualMachine, + SecurityGroup, + ResourceDetails + ) +from marvin.lib.common import (get_domain, + get_template, + list_disk_offering, + list_storage_pools, + list_volumes, + list_service_offering, + list_zones) +from marvin.lib.utils import random_gen, cleanup_resources +from nose.plugins.attrib import attr +from storpool import spapi + +from sp_util import (TestData, StorPoolHelper) + + +class TestStorPoolTiers(cloudstackTestCase): + @classmethod + def setUpClass(cls): + super(TestStorPoolTiers, cls).setUpClass() + try: + cls.setUpCloudStack() + except Exception: + raise + + @classmethod + def setUpCloudStack(cls): + config = cls.getClsConfig() + StorPoolHelper.logger = cls + + zone = config.zones[0] + assert zone is not None + + cls.spapi = spapi.Api(host=zone.spEndpoint, port=zone.spEndpointPort, auth=zone.spAuthToken, multiCluster=True) + testClient = super(TestStorPoolTiers, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.unsupportedHypervisor = False + cls.hypervisor = testClient.getHypervisorInfo() + if cls.hypervisor.lower() in ("hyperv", "lxc"): + cls.unsupportedHypervisor = True + return + + cls._cleanup = [] + + cls.services = testClient.getParsedTestDataConfig() + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = list_zones(cls.apiclient, name=zone.name)[0] + + td = TestData() + cls.testdata = td.testdata + cls.helper = StorPoolHelper() + + disk_offerings_tier1_tags = cls.testdata[TestData.diskOfferingTier1Tag] + disk_offerings_tier2_tags = cls.testdata[TestData.diskOfferingTier2Tag] + disk_offerings_tier1_template = cls.testdata[TestData.diskOfferingTier1Template] + disk_offerings_tier2_template = cls.testdata[TestData.diskOfferingTier2Template] + disk_offerings_tier2_tags_template = cls.testdata[TestData.diskOfferingWithTagsAndTempl] + + cls.qos = "SP_QOSCLASS" + cls.spTemplate = "SP_TEMPLATE" + + cls.disk_offerings_tier1_tags = cls.getDiskOffering(disk_offerings_tier1_tags, cls.qos, "ssd") + + cls.disk_offerings_tier2_tags = cls.getDiskOffering(disk_offerings_tier2_tags, cls.qos, "virtual") + + cls.disk_offerings_tier1_template = cls.getDiskOffering(disk_offerings_tier1_template, cls.spTemplate, "ssd") + + cls.disk_offerings_tier2_template = cls.getDiskOffering(disk_offerings_tier2_template, cls.spTemplate, + "virtual") + cls.disk_offerings_tier2_tags_template = cls.getDiskOffering(disk_offerings_tier2_tags_template, cls.spTemplate, + "virtual") + cls.resourceDetails(cls.qos, cls.disk_offerings_tier2_tags_template.id, "virtual") + + cls.account = cls.helper.create_account( + cls.apiclient, + cls.services["account"], + accounttype=1, + domainid=cls.domain.id, + roleid=1 + ) + cls._cleanup.append(cls.account) + + securitygroup = SecurityGroup.list(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid)[0] + cls.helper.set_securityGroups(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid, + id=securitygroup.id) + + storpool_primary_storage = cls.testdata[TestData.primaryStorage] + + storpool_service_offerings = cls.testdata[TestData.serviceOffering] + + cls.template_name = storpool_primary_storage.get("name") + + storage_pool = list_storage_pools( + cls.apiclient, + name=cls.template_name + ) + + service_offerings = list_service_offering( + cls.apiclient, + name=cls.template_name + ) + + disk_offerings = list_disk_offering( + cls.apiclient, + name="ssd" + ) + + if storage_pool is None: + storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) + else: + storage_pool = storage_pool[0] + cls.storage_pool = storage_pool + cls.debug(pprint.pformat(storage_pool)) + if service_offerings is None: + service_offerings = ServiceOffering.create(cls.apiclient, storpool_service_offerings) + else: + service_offerings = service_offerings[0] + # The version of CentOS has to be supported + template = get_template( + cls.apiclient, + cls.zone.id, + account="system" + ) + + if template == FAILED: + assert False, "get_template() failed to return template\ + with description %s" % cls.services["ostype"] + + cls.services["domainid"] = cls.domain.id + cls.services["small"]["zoneid"] = cls.zone.id + cls.services["templates"]["ostypeid"] = template.ostypeid + cls.services["zoneid"] = cls.zone.id + + cls.service_offering = service_offerings + cls.debug(pprint.pformat(cls.service_offering)) + + cls.template = template + cls.random_data_0 = random_gen(size=100) + cls.test_dir = "/tmp" + cls.random_data = "random.data" + return + + @classmethod + def getDiskOffering(cls, dataDiskOffering, qos, resValue): + disk_offerings = list_disk_offering(cls.apiclient, name=dataDiskOffering.get("name")) + if disk_offerings is None: + disk_offerings = DiskOffering.create(cls.apiclient, services=dataDiskOffering, custom=True) + cls.resourceDetails(qos, disk_offerings.id, resValue) + else: + disk_offerings = disk_offerings[0] + cls.resourceDetails(qos, disk_offerings.id, ) + return disk_offerings + + @classmethod + def tearDownClass(cls): + super(TestStorPoolTiers, cls).tearDownClass() + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + + if self.unsupportedHypervisor: + self.skipTest("Skipping test because unsupported hypervisor\ + %s" % self.hypervisor) + return + + def tearDown(self): + super(TestStorPoolTiers, self).tearDown() + + @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") + def test_01_check_tags_on_deployed_vm_and_datadisk(self): + virtual_machine_tier1_tag = self.deploy_vm_and_check_tier_tag() + virtual_machine_tier1_tag.stop(self.apiclient, forced=True) + + @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") + def test_02_change_offering_on_attached_root_disk(self): + virtual_machine_tier1_tag = self.deploy_vm_and_check_tier_tag() + + root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="ROOT", + listall=True) + self.changeOfferingForVolume(root_volume[0].id, self.disk_offerings_tier2_tags.id, root_volume[0].size) + root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="ROOT", + listall=True) + self.vc_policy_tags(volumes=root_volume, vm=virtual_machine_tier1_tag, qos_or_template=self.qos, + disk_offering_id=self.disk_offerings_tier2_tags.id, attached=True) + virtual_machine_tier1_tag.stop(self.apiclient, forced=True) + + def test_03_change_offering_on_attached_data_disk(self): + virtual_machine_tier1_tag = self.deploy_vm_and_check_tier_tag() + + root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="DATADISK", + listall=True) + self.changeOfferingForVolume(root_volume[0].id, self.disk_offerings_tier2_tags.id, root_volume[0].size) + root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="DATADISK", + listall=True) + self.vc_policy_tags(volumes=root_volume, vm=virtual_machine_tier1_tag, qos_or_template=self.qos, + disk_offering_id=self.disk_offerings_tier2_tags.id, attached=True) + virtual_machine_tier1_tag.stop(self.apiclient, forced=True) + + @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") + def test_04_check_templates_on_deployed_vm_and_datadisk(self): + virtual_machine_template_tier1 = VirtualMachine.create( + self.apiclient, + {"name": "StorPool-%s" % uuid.uuid4()}, + zoneid=self.zone.id, + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + overridediskofferingid=self.disk_offerings_tier1_template.id, + diskofferingid=self.disk_offerings_tier1_template.id, + size=2, + hypervisor=self.hypervisor, + rootdisksize=10 + ) + volumes = list_volumes(self.apiclient, virtualmachineid=virtual_machine_template_tier1.id, listall=True) + for v in volumes: + self.check_storpool_template(v, self.disk_offerings_tier1_template.id, self.spTemplate) + virtual_machine_template_tier1.stop(self.apiclient, forced=True) + + @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") + def test_05_check_templates_on_deployed_vm_and_datadisk_tier2(self): + virtual_machine_template_tier2 = VirtualMachine.create( + self.apiclient, + {"name": "StorPool-%s" % uuid.uuid4()}, + zoneid=self.zone.id, + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + overridediskofferingid=self.disk_offerings_tier2_template.id, + diskofferingid=self.disk_offerings_tier2_template.id, + size=2, + hypervisor=self.hypervisor, + rootdisksize=10 + ) + volumes = list_volumes(self.apiclient, virtualmachineid=virtual_machine_template_tier2.id, listall=True) + for v in volumes: + self.check_storpool_template(v, self.disk_offerings_tier2_template.id, self.spTemplate) + virtual_machine_template_tier2.stop(self.apiclient, forced=True) + + @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") + def test_06_change_offerings_with_tags_detached_volume(self): + disk_off_id = self.disk_offerings_tier2_tags.id + virtual_machine_tier2_tag = VirtualMachine.create( + self.apiclient, + {"name": "StorPool-%s" % uuid.uuid4()}, + zoneid=self.zone.id, + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + overridediskofferingid=disk_off_id, + diskofferingid=disk_off_id, + size=2, + hypervisor=self.hypervisor, + rootdisksize=10 + ) + virtual_machine_tier2_tag.stop(self.apiclient, forced=True) + volumes = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier2_tag.id, type="DATADISK", + listall=True) + + virtual_machine_tier2_tag.detach_volume( + self.apiclient, + volumes[0] + ) + + self.vc_policy_tags(volumes=volumes, vm=virtual_machine_tier2_tag, qos_or_template=self.qos, + disk_offering_id=disk_off_id, attached=True) + + self.changeOfferingForVolume(volumes[0].id, self.disk_offerings_tier1_tags.id, volumes[0].size) + self.vc_policy_tags(volumes=volumes, vm=virtual_machine_tier2_tag, qos_or_template=self.qos, + disk_offering_id=self.disk_offerings_tier1_tags.id, attached=True) + + @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") + def test_07_change_offerings_with_template_detached_volume(self): + disk_off_id = self.disk_offerings_tier2_template.id + virtual_machine_tier2_template = VirtualMachine.create( + self.apiclient, + {"name": "StorPool-%s" % uuid.uuid4()}, + zoneid=self.zone.id, + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + overridediskofferingid=disk_off_id, + diskofferingid=disk_off_id, + size=2, + hypervisor=self.hypervisor, + rootdisksize=10 + ) + virtual_machine_tier2_template.stop(self.apiclient, forced=True) + volumes = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier2_template.id, type="DATADISK", + listall=True) + + virtual_machine_tier2_template.detach_volume( + self.apiclient, + volumes[0] + ) + + self.check_storpool_template(volume=volumes[0], disk_offering_id=disk_off_id, qos_or_template=self.spTemplate) + + self.changeOfferingForVolume(volumes[0].id, self.disk_offerings_tier1_template.id, volumes[0].size) + self.check_storpool_template(volume=volumes[0], disk_offering_id=self.disk_offerings_tier1_template.id, + qos_or_template=self.spTemplate) + + @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") + def test_08_deploy_vm_with_tags_and_template_in_offerings(self): + """ + Deploy virtual machine with disk offering on which resource details is set tier2 template and tier2 qos tags + """ + disk_off_id = self.disk_offerings_tier2_tags_template.id + virtual_machine_tier2_template = VirtualMachine.create( + self.apiclient, + {"name": "StorPool-%s" % uuid.uuid4()}, + zoneid=self.zone.id, + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + overridediskofferingid=disk_off_id, + diskofferingid=disk_off_id, + size=2, + hypervisor=self.hypervisor, + rootdisksize=10 + ) + virtual_machine_tier2_template.stop(self.apiclient, forced=True) + volumes = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier2_template.id, type="DATADISK", + listall=True) + + virtual_machine_tier2_template.detach_volume( + self.apiclient, + volumes[0] + ) + + self.check_storpool_template(volume=volumes[0], disk_offering_id=disk_off_id, qos_or_template=self.spTemplate, + diff_template=True) + self.vc_policy_tags(volumes=volumes, vm=virtual_machine_tier2_template, qos_or_template=self.qos, + disk_offering_id=disk_off_id, attached=True) + + self.changeOfferingForVolume(volumes[0].id, self.disk_offerings_tier1_tags.id, volumes[0].size) + self.vc_policy_tags(volumes=volumes, vm=virtual_machine_tier2_template, qos_or_template=self.qos, + disk_offering_id=self.disk_offerings_tier1_tags.id, attached=True) + + @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") + def test_09_resize_root_volume(self): + ''' + Resize Root volume with changeOfferingForVolume + ''' + virtual_machine_tier1_tag = self.deploy_vm_and_check_tier_tag() + + root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="ROOT", + listall=True) + self.changeOfferingForVolume(root_volume[0].id, self.disk_offerings_tier2_tags.id, (root_volume[0].size + 1024)) + root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="ROOT", + listall=True) + self.vc_policy_tags(volumes=root_volume, vm=virtual_machine_tier1_tag, qos_or_template=self.qos, + disk_offering_id=self.disk_offerings_tier2_tags.id, attached=True) + virtual_machine_tier1_tag.stop(self.apiclient, forced=True) + + @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") + def test_10_shrink_root_volume(self): + ''' + Shrink Root volume with changeOfferingForVolume + ''' + virtual_machine_tier1_tag = self.deploy_vm_and_check_tier_tag() + + root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="ROOT", + listall=True) + virtual_machine_tier1_tag.stop(self.apiclient, forced=True) + self.changeOfferingForVolume(root_volume[0].id, self.disk_offerings_tier2_tags.id, (root_volume[0].size - 1024), + True) + root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="ROOT", + listall=True) + self.vc_policy_tags(volumes=root_volume, vm=virtual_machine_tier1_tag, qos_or_template=self.qos, + disk_offering_id=self.disk_offerings_tier2_tags.id, attached=True) + + @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") + def test_11_resize_data_volume(self): + ''' + Resize DATADISK volume with changeOfferingForVolume + ''' + virtual_machine_tier1_tag = self.deploy_vm_and_check_tier_tag() + + root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="DATADISK", + listall=True) + self.changeOfferingForVolume(root_volume[0].id, self.disk_offerings_tier2_tags.id, (root_volume[0].size + 1024)) + root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="DATADISK", + listall=True) + self.vc_policy_tags(volumes=root_volume, vm=virtual_machine_tier1_tag, qos_or_template=self.qos, + disk_offering_id=self.disk_offerings_tier2_tags.id, attached=True) + virtual_machine_tier1_tag.stop(self.apiclient, forced=True) + + @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") + def test_12_shrink_data_volume(self): + ''' + Shrink DATADISK volume with changeOfferingForVolume + ''' + virtual_machine_tier1_tag = self.deploy_vm_and_check_tier_tag() + + root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="DATADISK", + listall=True) + self.changeOfferingForVolume(root_volume[0].id, self.disk_offerings_tier2_tags.id, (root_volume[0].size - 1024), + True) + root_volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, type="DATADISK", + listall=True) + self.vc_policy_tags(volumes=root_volume, vm=virtual_machine_tier1_tag, qos_or_template=self.qos, + disk_offering_id=self.disk_offerings_tier2_tags.id, attached=True) + virtual_machine_tier1_tag.stop(self.apiclient, forced=True) + + def deploy_vm_and_check_tier_tag(self): + virtual_machine_tier1_tag = VirtualMachine.create( + self.apiclient, + {"name": "StorPool-%s" % uuid.uuid4()}, + zoneid=self.zone.id, + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + overridediskofferingid=self.disk_offerings_tier1_tags.id, + diskofferingid=self.disk_offerings_tier1_tags.id, + size=2, + hypervisor=self.hypervisor, + rootdisksize=10 + ) + volumes = list_volumes(self.apiclient, virtualmachineid=virtual_machine_tier1_tag.id, listall=True) + self.vc_policy_tags(volumes=volumes, vm=virtual_machine_tier1_tag, qos_or_template=self.qos, + disk_offering_id=self.disk_offerings_tier1_tags.id, attached=True) + return virtual_machine_tier1_tag + + @classmethod + def resourceDetails(cls, qos, id, resValue=None): + listResourceDetailCmd = listResourceDetails.listResourceDetailsCmd() + listResourceDetailCmd.resourceid = id + listResourceDetailCmd.resourcetype = "DiskOffering" + listResourceDetailCmd.key = qos + details = cls.apiclient.listResourceDetails(listResourceDetailCmd) + + if details is None: + resource = addResourceDetail.addResourceDetailCmd() + resource.resourceid = id + resource.resourcetype = "DiskOffering" + resDet = {'key': qos, 'value': resValue} + resource.details = [resDet] + + resource.fordisplay = True + details = cls.apiclient.addResourceDetail(resource) + + @classmethod + def getZone(cls): + zones = list_zones(cls.apiclient) + for z in zones: + if z.name == cls.getClsConfig().mgtSvr[0].zone: + cls.zone = z + assert cls.zone is not None + + def vc_policy_tags(self, volumes, vm, qos_or_template, disk_offering_id, should_tags_exists=None, vm_tags=None, + attached=None): + vc_policy_tag = False + cvm_tag = False + qs_tag = False + id = vm.id + for v in volumes: + name = v.path.split("/")[3] + volume = self.spapi.volumeList(volumeName="~" + name) + tags = volume[0].tags + resource_details_value = ResourceDetails.list(self.apiclient, resourcetype="DiskOffering", + resourceid=disk_offering_id, key=qos_or_template) + for t in tags: + self.debug("TAGS are %s" % t) + if vm_tags: + for vm_tag in vm_tags: + if t == vm_tag.key: + vc_policy_tag = True + self.assertEqual(tags[t], vm_tag.value, "Tags are not equal") + if t == 'cvm': + self.debug("CVM tag %s is not the same as vm UUID %s" % (tags[t], id)) + self.debug(type(tags[t])) + self.debug(len(tags[t])) + self.debug(type(id)) + self.debug(len(id)) + cvm_tag = True + self.assertEqual(tags[t], id, "CVM tag is not the same as vm UUID ") + if t == 'qc': + qs_tag = True + self.assertEqual(tags[t], resource_details_value[0].value, "QOS tags should be the same") + if should_tags_exists: + self.assertTrue(vc_policy_tag, "There aren't volumes with vm tags") + self.assertTrue(cvm_tag, "There aren't volumes with vm tags") + if attached: + self.assertTrue(qs_tag, "The QOS tag isn't set") + else: + self.assertFalse(vc_policy_tag, "The tags should be removed") + self.assertFalse(cvm_tag, "The tags should be removed") + + def check_storpool_template(self, volume, disk_offering_id, qos_or_template, diff_template=None): + name = volume.path.split("/")[3] + sp_volume = self.spapi.volumeList(volumeName="~" + name) + template = sp_volume[0].templateName + resource_details_value = ResourceDetails.list(self.apiclient, resourcetype="DiskOffering", + resourceid=disk_offering_id, key=qos_or_template) + if diff_template: + self.assertNotEqual(template, resource_details_value[0].value, "The templates should not be the same") + else: + self.assertEqual(template, resource_details_value[0].value) + + def changeOfferingForVolume(self, volume_id, disk_offering_id, size, shrinkok=None): + size = int(size / 1024 / 1024 / 1024) + change_offering_for_volume_cmd = changeOfferingForVolume.changeOfferingForVolumeCmd() + change_offering_for_volume_cmd.id = volume_id + change_offering_for_volume_cmd.diskofferingid = disk_offering_id + change_offering_for_volume_cmd.size = size + change_offering_for_volume_cmd.shrinkok = shrinkok + + return self.apiclient.changeOfferingForVolume(change_offering_for_volume_cmd) diff --git a/test/integration/plugins/test_quota_tariff_order.py b/test/integration/plugins/test_quota_tariff_order.py new file mode 100644 index 00000000000..3236c6c3c18 --- /dev/null +++ b/test/integration/plugins/test_quota_tariff_order.py @@ -0,0 +1,175 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" Test cases for checking quota API +""" + +# Import Local Modules +import tools.marvin.marvin +from tools.marvin.marvin.cloudstackTestCase import * +from tools.marvin.marvin.cloudstackAPI import * +from tools.marvin.marvin.lib.utils import * +from tools.marvin.marvin.lib.base import * +from tools.marvin.marvin.lib.common import * +from nose.plugins.attrib import attr + +# Import System modules +import time + + +class TestQuotaTariffOrder(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestQuotaTariffOrder, cls).getClsTestClient() + cls.api_client = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.api_client) + cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) + + cls._cleanup = [] + # Create Account + cls.account = Account.create( + cls.api_client, + cls.services["account"], + domainid=cls.domain.id + ) + cls._cleanup.append(cls.account) + + cls.services["account"] = cls.account.name + + return + + @classmethod + def tearDownClass(cls): + super(TestQuotaTariffOrder, cls).tearDownClass() + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + self.tariffs = [] + return + + def tearDown(self): + self.delete_tariffs() + super(TestQuotaTariffOrder, self).tearDown() + + def delete_tariffs(self): + for tariff in self.tariffs: + cmd = quotaTariffDelete.quotaTariffDeleteCmd() + cmd.id = tariff.uuid + self.api_client.quotaTariffDelete(cmd) + + @attr( + tags=[ + "advanced", + "smoke"], + required_hardware="false") + def test_01_quota_tariff_order(self): + """Test Quota Tariff Order + """ + + cmd = quotaTariffCreate.quotaTariffCreateCmd() + cmd.name = 'tf1' + cmd.value = '1' + cmd.activationrule = '10' + cmd.usagetype = '22' + cmd.position = '2' + self.tariffs.append(self.api_client.quotaTariffCreate(cmd)) + + cmd = quotaTariffCreate.quotaTariffCreateCmd() + cmd.name = 'tf2' + cmd.value = '1' + cmd.activationrule = 'lastTariffs[lastTariffs.length -1].value + 7' + cmd.usagetype = '22' + cmd.position = '3' + self.tariffs.append(self.api_client.quotaTariffCreate(cmd)) + + cmd = quotaTariffCreate.quotaTariffCreateCmd() + cmd.name = 'tf3' + cmd.value = '1' + cmd.activationrule = 'lastTariffs[lastTariffs.length -2].value + lastTariffs[lastTariffs.length -1].value' + cmd.usagetype = '22' + cmd.position = '4' + self.tariffs.append(self.api_client.quotaTariffCreate(cmd)) + + cmd = quotaCredits.quotaCreditsCmd() + cmd.account = self.account.name + cmd.domainid = self.domain.id + cmd.value = 54 + self.api_client.quotaCredits(cmd) + + # Fetch account ID from account_uuid + self.debug("select id from account where uuid = '%s';" + % self.account.id) + + qresultset = self.dbclient.execute( + "select id from account where uuid = '%s';" + % self.account.id + ) + + account_id = qresultset[0][0] + + self.debug("SELECT id from `domain` d WHERE uuid = '%s';" + % self.domain.id) + + qresultset = self.dbclient.execute( + "SELECT id from `domain` d WHERE uuid = '%s';" + % self.domain.id + ) + + domain_id = qresultset[0][0] + + self.debug("SELECT id from data_center dc where dc.uuid = '%s';" + % self.zone.id) + + qresultset = self.dbclient.execute( + "SELECT id from data_center dc where dc.uuid = '%s';" + % self.zone.id + ) + + zone_id = qresultset[0][0] + + start = datetime.datetime.now() + datetime.timedelta(seconds=1) + end = datetime.datetime.now() + datetime.timedelta(hours=1) + + query = "INSERT INTO cloud_usage.cloud_usage (zone_id,account_id,domain_id,description,usage_display," + "usage_type,raw_usage,vm_instance_id,vm_name,offering_id,template_id,usage_id,`type`,`size`," + "network_id,start_date,end_date,virtual_size,cpu_speed,cpu_cores,memory,quota_calculated," + "is_hidden,state) VALUES ('{}','{}','{}','Test','1 Hrs',22,1,NULL,NULL,NULL,NULL,NULL," + "'VirtualMachine',NULL,NULL,'{}','{}',NULL,NULL,NULL,NULL,0,0,NULL);".format(zone_id, account_id, domain_id, start, end) + + self.debug(query) + + self.dbclient.execute( + query) + + cmd = quotaUpdate.quotaUpdateCmd() + self.api_client.quotaUpdate(cmd) + + cmd = quotaBalance.quotaBalanceCmd() + cmd.domainid = self.account.domainid + cmd.account = self.account.name + response = self.apiclient.quotaBalance(cmd) + + self.debug(f"Quota Balance: {response.balance}") + + self.assertEqual(response.balance.startquota, 0, f"startQuota is supposed to be 0 but was {response.balance.startquota}") + + return diff --git a/test/integration/smoke/test_account_access.py b/test/integration/smoke/test_account_access.py new file mode 100644 index 00000000000..97eeced6386 --- /dev/null +++ b/test/integration/smoke/test_account_access.py @@ -0,0 +1,198 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" BVT tests for Account User Access +""" +# Import Local Modules +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.lib.utils import * +from marvin.lib.base import (Account, + User, + Domain) +from marvin.lib.common import (get_domain) +from marvin.cloudstackAPI import (getUserKeys) +from marvin.cloudstackException import CloudstackAPIException +from nose.plugins.attrib import attr + +_multiprocess_shared_ = True + +class TestAccountAccess(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestAccountAccess, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + cls.hypervisor = testClient.getHypervisorInfo() + cls._cleanup = [] + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + + cls.domains = [] + cls.domain_admins = {} + cls.domain_users = {} + cls.account_users = {} + + domain_data = { + "name": "domain_1" + } + cls.domain_1 = Domain.create( + cls.apiclient, + domain_data, + ) + cls._cleanup.append(cls.domain_1) + cls.domains.append(cls.domain_1) + domain_data["name"] = "domain_11" + cls.domain_11 = Domain.create( + cls.apiclient, + domain_data, + parentdomainid=cls.domain_1.id + ) + cls._cleanup.append(cls.domain_11) + cls.domains.append(cls.domain_11) + domain_data["name"] = "domain_12" + cls.domain_12 = Domain.create( + cls.apiclient, + domain_data, + parentdomainid=cls.domain_1.id + ) + cls._cleanup.append(cls.domain_12) + cls.domains.append(cls.domain_12) + domain_data["name"] = "domain_2" + cls.domain_2 = Domain.create( + cls.apiclient, + domain_data, + ) + cls._cleanup.append(cls.domain_2) + cls.domains.append(cls.domain_2) + + + for d in cls.domains: + cls.create_domainadmin_and_user(d) + + @classmethod + def tearDownClass(cls): + super(TestAccountAccess, cls).tearDownClass() + + @classmethod + def create_account(cls, domain, is_admin): + cls.debug(f"Creating account for domain {domain.name}, admin: {is_admin}") + data = { + "email": "admin-" + domain.name + "@test.com", + "firstname": "Admin", + "lastname": domain.name, + "username": "admin-" + domain.name, + "password": "password" + } + if is_admin == False: + data["email"] = "user-" + domain.name + "@test.com" + data["firstname"] = "User" + data["username"] = "user-" + domain.name + account = Account.create( + cls.apiclient, + data, + admin=is_admin, + domainid=domain.id + ) + cls._cleanup.append(account) + if is_admin == True: + cls.domain_admins[domain.id] = account + else: + cls.domain_users[domain.id] = account + + user = User.create( + cls.apiclient, + data, + account=account.name, + domainid=account.domainid) + cls._cleanup.append(user) + cls.account_users[account.id] = user + + @classmethod + def create_domainadmin_and_user(cls, domain): + cls.debug(f"Creating accounts for domain #{domain.id} {domain.name}") + cls.create_account(domain, True) + cls.create_account(domain, False) + + def get_user_keys(self, api_client, user_id): + getUserKeysCmd = getUserKeys.getUserKeysCmd() + getUserKeysCmd.id = user_id + return api_client.getUserKeys(getUserKeysCmd) + + def is_child_domain(self, parent_domain, child_domain): + if not parent_domain or not child_domain: + return False + parent_domain_prefix = parent_domain.split('-')[0] + child_domain_prefix = child_domain.split('-')[0] + if not parent_domain_prefix or not child_domain_prefix: + return False + return child_domain_prefix.startswith(parent_domain_prefix) + + + @attr(tags=["advanced", "advancedns", "smoke", "sg"], required_hardware="false") + def test_01_user_access(self): + """ + Test user account is not accessing any other account + """ + + domain_user_accounts = [value for value in self.domain_users.values()] + all_account_users = [value for value in self.account_users.values()] + for user_account in domain_user_accounts: + current_account_user = self.account_users[user_account.id] + self.debug(f"Check for account {user_account.name} with user {current_account_user.username}") + user_api_client = self.testClient.getUserApiClient( + UserName=user_account.name, + DomainName=user_account.domain + ) + for user in all_account_users: + self.debug(f"Checking access for user {user.username} associated with account {user.account}") + try: + self.get_user_keys(user_api_client, user.id) + self.debug(f"API successful") + if user.id != current_account_user.id: + self.fail(f"User account #{user_account.id} was able to access another account #{user.id}") + except CloudstackAPIException as e: + self.debug(f"Exception occurred: {e}") + if user.id == current_account_user.id: + self.fail(f"User account #{user_account.id} not able to access own account") + + @attr(tags=["advanced", "advancedns", "smoke", "sg"], required_hardware="false") + def test_02_domain_admin_access(self): + """ + Test domain admin account is not accessing any other account from unauthorized domain + """ + + domain_admin_accounts = [value for value in self.domain_admins.values()] + all_account_users = [value for value in self.account_users.values()] + for admin_account in domain_admin_accounts: + current_account_user = self.account_users[admin_account.id] + self.debug(f"Check for domain admin {admin_account.name} with user {current_account_user.username}, {current_account_user.domain}") + admin_api_client = self.testClient.getUserApiClient( + UserName=admin_account.name, + DomainName=admin_account.domain + ) + for user in all_account_users: + self.debug(f"Checking access for user {user.username}, {user.domain} associated with account {user.account}") + try: + self.get_user_keys(admin_api_client, user.id) + self.debug(f"API successful") + if self.is_child_domain(current_account_user.domain, user.domain) == False: + self.fail(f"User account #{admin_account.id} was able to access another account #{user.id}") + except CloudstackAPIException as e: + self.debug(f"Exception occurred: {e}") + if self.is_child_domain(current_account_user.domain, user.domain) == True: + self.fail(f"User account #{admin_account.id} not able to access own account") diff --git a/test/integration/smoke/test_cluster_drs.py b/test/integration/smoke/test_cluster_drs.py index 4db6654aa73..f1eb48f92f5 100644 --- a/test/integration/smoke/test_cluster_drs.py +++ b/test/integration/smoke/test_cluster_drs.py @@ -21,8 +21,10 @@ Tests DRS on a cluster import logging import time +from collections.abc import Iterable from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.cloudstackAPI import (migrateSystemVm, listRouters, listSystemVms) from marvin.lib.base import (Cluster, Configurations, Host, Network, NetworkOffering, ServiceOffering, VirtualMachine, Zone) from marvin.lib.common import (get_domain, get_zone, get_template) @@ -98,6 +100,43 @@ class TestClusterDRS(cloudstackTestCase): ) cls._cleanup.append(cls.network) + cls.hypervisor = cls.testClient.getHypervisorInfo() + if cls.hypervisor.lower() not in ['simulator']: + cls.migrateSvms(cls.cluster) + + @classmethod + def migrateSvms(cls, cluster): + """ + for testing the balanced algorithm we must make sure there is at least as more free memory on host[1] than on + host[0]. As a grude measure we migrate any and all system vms to host[0] before the testing commences + + :param cluster: the cluser to check + :return: None + """ + + systemVmIds = [] + cmds = listSystemVms.listSystemVmsCmd() + responseS = cls.apiclient.listSystemVms(cmds) + if isinstance(responseS, Iterable): + for svm in responseS: + if svm.hostid != cls.hosts[0].id: + systemVmIds.append(svm.id) + cmdv = listRouters.listRoutersCmd() + responseR = cls.apiclient.listRouters(cmdv) + if isinstance(responseR, Iterable): + for svm in responseR: + if svm.hostid != cls.hosts[0].id: + systemVmIds.append(svm.id) + numToMigrate = len(systemVmIds) + cls.logger.debug(f'system vms and routers to migrate -- {numToMigrate}') + cmdM = migrateSystemVm.migrateSystemVmCmd() + cmdM.hostId=cls.hosts[0].id + for id in systemVmIds: + cmdM.virtualmachineid=id + responseM = cls.apiclient.migrateSystemVm(cmdM) + cls.logger.debug(f'migrated {responseM}') + + @classmethod def tearDownClass(cls): super(TestClusterDRS, cls).tearDownClass() @@ -111,7 +150,6 @@ class TestClusterDRS(cloudstackTestCase): def tearDown(self): super(TestClusterDRS, self).tearDown() - @classmethod def get_vm_host_id(cls, vm_id): list_vms = VirtualMachine.list(cls.apiclient, id=vm_id) vm = list_vms[0] @@ -188,8 +226,8 @@ class TestClusterDRS(cloudstackTestCase): serviceofferingid=self.service_offering.id, templateid=self.template.id, zoneid=self.zone.id, networkids=self.network.id, hostid=self.hosts[1].id) - vm_2_host_id = self.get_vm_host_id(self.virtual_machine_2.id) self.cleanup.append(self.virtual_machine_2) + vm_2_host_id = self.get_vm_host_id(self.virtual_machine_2.id) self.assertNotEqual(vm_1_host_id, vm_2_host_id, msg="Both VMs should be on different hosts") self.wait_for_vm_start(self.virtual_machine_1) @@ -216,13 +254,15 @@ class TestClusterDRS(cloudstackTestCase): @attr(tags=["advanced"], required_hardware="false") def test_02_balanced_drs_algorithm(self): - """ Verify DRS algorithm - balanced""" - - # 1. Deploy vm-1 on host 1 - # 2. Deploy vm-2 on host 2 - # 3. Execute DRS to move all VMs on different hosts + """ + Verify DRS algorithm - balanced + # 1. Deploy vm-1 on host 1 + # 2. Deploy vm-2 on host 2 + # 3. Execute DRS to move all VMs on different hosts + """ self.logger.debug("=== Running test_02_balanced_drs_algorithm ===") + # 1. Deploy vm-1 on host 1 self.services["virtual_machine"]["name"] = "virtual-machine-1" self.services["virtual_machine"]["displayname"] = "virtual-machine-1" @@ -240,8 +280,8 @@ class TestClusterDRS(cloudstackTestCase): serviceofferingid=self.service_offering.id, templateid=self.template.id, zoneid=self.zone.id, networkids=self.network.id, hostid=self.hosts[0].id) - vm_2_host_id = self.get_vm_host_id(self.virtual_machine_2.id) self.cleanup.append(self.virtual_machine_2) + vm_2_host_id = self.get_vm_host_id(self.virtual_machine_2.id) self.assertEqual(vm_1_host_id, vm_2_host_id, msg="Both VMs should be on same hosts") self.wait_for_vm_start(self.virtual_machine_1) @@ -256,7 +296,8 @@ class TestClusterDRS(cloudstackTestCase): migration["virtualmachineid"]: migration["destinationhostid"] for migration in migrations } - self.assertEqual(len(vm_to_dest_host_map), 1, msg="DRS plan should have 1 migrations") + # this is one if no svm is considered to be migrated, it might be higher + self.assertTrue(len(vm_to_dest_host_map) >= 1, msg="DRS plan should have at least 1 migrations") executed_plan = self.cluster.executeDrsPlan(self.apiclient, vm_to_dest_host_map) self.wait_for_plan_completion(executed_plan) @@ -264,4 +305,6 @@ class TestClusterDRS(cloudstackTestCase): vm_1_host_id = self.get_vm_host_id(self.virtual_machine_1.id) vm_2_host_id = self.get_vm_host_id(self.virtual_machine_2.id) - self.assertNotEqual(vm_1_host_id, vm_2_host_id, msg="Both VMs should be on different hosts") + self.assertTrue( + vm_1_host_id != self.virtual_machine_1.hostid or vm_2_host_id != self.virtual_machine_2.hostid, + msg="At least one VM should have been migrated to a different host") diff --git a/test/integration/smoke/test_ipv4_routing.py b/test/integration/smoke/test_ipv4_routing.py new file mode 100644 index 00000000000..124be678965 --- /dev/null +++ b/test/integration/smoke/test_ipv4_routing.py @@ -0,0 +1,1673 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" Test for IPv4 Routed mode""" +import datetime +import logging +import random +import time + +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.lib.base import ZoneIpv4Subnet, Domain, Account, ServiceOffering, NetworkOffering, VpcOffering, Network, \ + Ipv4SubnetForGuestNetwork, VirtualMachine, VPC, NetworkACLList, NetworkACL, RoutingFirewallRule, Template, ASNRange, \ + BgpPeer, Router +from marvin.lib.common import get_domain, get_zone, list_routers, list_hosts +from marvin.lib.utils import get_host_credentials, get_process_status + +from nose.plugins.attrib import attr + +ICMPv4_ALL_TYPES = ("{ echo-reply, destination-unreachable, source-quench, redirect, echo-request, " + "router-advertisement, router-solicitation, time-exceeded, parameter-problem, timestamp-request, " + "timestamp-reply, info-request, info-reply, address-mask-request, address-mask-reply }") +SUBNET_PREFIX = "172.30." +SUBNET_1_PREFIX = SUBNET_PREFIX + str(random.randrange(100, 150)) +SUBNET_2_PREFIX = SUBNET_PREFIX + str(random.randrange(151, 199)) + +VPC_CIDR_PREFIX = "172.31" # .0 to .16 +NETWORK_CIDR_PREFIX = VPC_CIDR_PREFIX + ".100" +NETWORK_CIDR_PREFIX_DYNAMIC = VPC_CIDR_PREFIX + ".101" + +MAX_RETRIES = 30 +WAIT_INTERVAL = 5 + +test_network = None +test_network_vm = None +test_vpc = None +test_vpc_tier = None +test_vpc_vm = None +test_network_acl = None + +START_ASN = 888800 +END_ASN = 888888 +ASN_1 = 900100 + random.randrange(1, 200) +ASN_2 = 900301 + random.randrange(0, 200) +IP4_ADDR_1 = "10.0.53.10" +IP4_ADDR_2 = "10.0.53.11" +PASSWORD_1 = "testpassword1" +PASSWORD_2 = "testpassword2" + +NETWORK_OFFERING = { + "name": "Test Network offering - Routed mode", + "displaytext": "Test Network offering - Routed mode", + "networkmode": "ROUTED", + "guestiptype": "Isolated", + "supportedservices": + "Dhcp,Dns,UserData,Firewall", + "traffictype": "GUEST", + "availability": "Optional", + "egress_policy": "true", + "serviceProviderList": { + "Dhcp": "VirtualRouter", + "Dns": "VirtualRouter", + "UserData": "VirtualRouter", + "Firewall": "VirtualRouter" + } +} + +VPC_OFFERING = { + "name": "Test VPC offering - Routed mode", + "displaytext": "Test VPC offering - Routed mode", + "networkmode": "ROUTED", + "supportedservices": + "Dhcp,Dns,UserData,NetworkACL" +} + +VPC_NETWORK_OFFERING = { + "name": "Test VPC Network offering - Routed mode", + "displaytext": "Test VPC Network offering - Routed mode", + "networkmode": "ROUTED", + "guestiptype": "Isolated", + "supportedservices": + "Dhcp,Dns,UserData,NetworkACL", + "traffictype": "GUEST", + "availability": "Optional", + "serviceProviderList": { + "Dhcp": "VpcVirtualRouter", + "Dns": "VpcVirtualRouter", + "UserData": "VpcVirtualRouter", + "NetworkACL": "VpcVirtualRouter" + } +} + +NETWORK_OFFERING_DYNAMIC = { + "name": "Test Network offering - Dynamic Routed mode", + "displaytext": "Test Network offering - Dynamic Routed mode", + "networkmode": "ROUTED", + "routingmode": "Dynamic", + "guestiptype": "Isolated", + "supportedservices": + "Dhcp,Dns,UserData,Firewall", + "traffictype": "GUEST", + "availability": "Optional", + "egress_policy": "true", + "serviceProviderList": { + "Dhcp": "VirtualRouter", + "Dns": "VirtualRouter", + "UserData": "VirtualRouter", + "Firewall": "VirtualRouter" + } +} + +VPC_OFFERING_DYNAMIC = { + "name": "Test VPC offering - Routed mode", + "displaytext": "Test VPC offering - Routed mode", + "networkmode": "ROUTED", + "routingmode": "Dynamic", + "supportedservices": + "Dhcp,Dns,UserData,NetworkACL" +} + +VPC_NETWORK_OFFERING_DYNAMIC = { + "name": "Test VPC Network offering - Dynamic Routed mode", + "displaytext": "Test VPC Network offering - Dynamic Routed mode", + "networkmode": "ROUTED", + "routingmode": "Dynamic", + "guestiptype": "Isolated", + "supportedservices": + "Dhcp,Dns,UserData,NetworkACL", + "traffictype": "GUEST", + "availability": "Optional", + "serviceProviderList": { + "Dhcp": "VpcVirtualRouter", + "Dns": "VpcVirtualRouter", + "UserData": "VpcVirtualRouter", + "NetworkACL": "VpcVirtualRouter" + } +} +class TestIpv4Routing(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testdata = super(TestIpv4Routing, cls).getClsTestClient() + cls.services = testdata.getParsedTestDataConfig() + cls.apiclient = testdata.getApiClient() + cls.dbclient = testdata.getDbConnection() + cls.hypervisor = testdata.getHypervisorInfo() + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient) + + cls._cleanup = [] + + cls.logger = logging.getLogger("TestIpv4Routing") + cls.stream_handler = logging.StreamHandler() + cls.logger.setLevel(logging.DEBUG) + cls.logger.addHandler(cls.stream_handler) + + # 0. register template + cls.template = Template.register(cls.apiclient, cls.services["test_templates"][cls.hypervisor.lower()], + zoneid=cls.zone.id, hypervisor=cls.hypervisor.lower()) + cls.template.download(cls.apiclient) + cls._cleanup.append(cls.template) + + # 1.1 create subnet for zone + cls.subnet_1 = ZoneIpv4Subnet.create( + cls.apiclient, + zoneid=cls.zone.id, + subnet=SUBNET_1_PREFIX + ".0/24" + ) + cls._cleanup.append(cls.subnet_1) + + # 1.2 create ASN range for zone + cls.asnrange = ASNRange.create( + cls.apiclient, + zoneid=cls.zone.id, + startasn=START_ASN, + endasn=END_ASN + ) + cls._cleanup.append(cls.asnrange) + + # 2. Create small service offering + cls.service_offering = ServiceOffering.create( + cls.apiclient, + cls.services["service_offerings"]["small"] + ) + cls._cleanup.append(cls.service_offering) + + # 3. Create network and vpc offering with routed mode + # 3.1 Network offering for static routing + cls.network_offering_isolated = NetworkOffering.create( + cls.apiclient, + NETWORK_OFFERING + ) + cls._cleanup.append(cls.network_offering_isolated) + cls.network_offering_isolated.update(cls.apiclient, state='Enabled') + + # 3.2 VPC offering for static routing + cls.vpc_offering = VpcOffering.create( + cls.apiclient, + VPC_OFFERING + ) + cls._cleanup.append(cls.vpc_offering) + cls.vpc_offering.update(cls.apiclient, state='Enabled') + + # 3.3 VPC tier offering for static routing + cls.vpc_network_offering = NetworkOffering.create( + cls.apiclient, + VPC_NETWORK_OFFERING + ) + cls._cleanup.append(cls.vpc_network_offering) + cls.vpc_network_offering.update(cls.apiclient, state='Enabled') + + # 3.4 Network offering for dynamic routing + cls.network_offering_dynamic = NetworkOffering.create( + cls.apiclient, + NETWORK_OFFERING_DYNAMIC + ) + cls._cleanup.append(cls.network_offering_dynamic) + cls.network_offering_dynamic.update(cls.apiclient, state='Enabled') + + # 3.5 VPC Network offering for dynamic routing + cls.vpc_network_offering_dynamic = NetworkOffering.create( + cls.apiclient, + VPC_NETWORK_OFFERING_DYNAMIC + ) + cls._cleanup.append(cls.vpc_network_offering_dynamic) + cls.vpc_network_offering_dynamic.update(cls.apiclient, state='Enabled') + + # 4. Create sub-domain + cls.sub_domain = Domain.create( + cls.apiclient, + cls.services["acl"]["domain1"] + ) + cls._cleanup.append(cls.sub_domain) + + # 5. Create regular user + cls.regular_user = Account.create( + cls.apiclient, + cls.services["acl"]["accountD11A"], + domainid=cls.sub_domain.id + ) + cls._cleanup.append(cls.regular_user) + + # 6. Create api clients for regular user + cls.regular_user_user = cls.regular_user.user[0] + cls.regular_user_apiclient = cls.testClient.getUserApiClient( + cls.regular_user_user.username, cls.sub_domain.name + ) + + @classmethod + def tearDownClass(cls): + super(TestIpv4Routing, cls).tearDownClass() + + @classmethod + def message(cls, msg): + cls.logger.debug("====== " + str(datetime.datetime.now()) + " " + msg + " ======") + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.cleanup = [] + + def tearDown(self): + super(TestIpv4Routing, self).tearDown() + + def get_router(self, networkid=None, vpcid=None): + # list router + if vpcid: + list_router_response = list_routers( + self.apiclient, + vpcid=vpcid, + listall="true" + ) + else: + list_router_response = list_routers( + self.apiclient, + networkid=networkid, + listall="true" + ) + self.assertEqual( + isinstance(list_router_response, list), + True, + "list routers response should return a valid list" + ) + router = list_router_response[0] + return router + + def run_command_in_router(self, router, command): + # get host of router + hosts = list_hosts( + self.apiclient, + zoneid=router.zoneid, + type='Routing', + state='Up', + id=router.hostid + ) + self.assertEqual( + isinstance(hosts, list), + True, + "Check list host returns a valid list" + ) + host = hosts[0] + + # run command + result = '' + if router.hypervisor.lower() in ('vmware', 'hyperv'): + result = get_process_status( + self.apiclient.connection.mgtSvr, + 22, + self.apiclient.connection.user, + self.apiclient.connection.passwd, + router.linklocalip, + command, + hypervisor=router.hypervisor + ) + else: + try: + host.user, host.passwd = get_host_credentials(self.config, host.ipaddress) + result = get_process_status( + host.ipaddress, + 22, + host.user, + host.passwd, + router.linklocalip, + command + ) + except KeyError: + self.skipTest("Marvin configuration has no host credentials to check router services") + res = str(result) + self.message("VR command (%s) result: (%s)" % (command, res)) + return res + + def rebootRouter(self, router): + try: + Router.reboot( + self.apiclient, + id=router.id + ) + except Exception as e: + self.fail("Failed to reboot the virtual router: %s, %s" % (router.id, e)) + + def createNetworkAclRule(self, rule): + return NetworkACL.create(self.apiclient, + services=rule, + aclid=test_network_acl.id) + + def createIpv4RoutingFirewallRule(self, rule): + return RoutingFirewallRule.create(self.apiclient, + services=rule, + networkid=test_network.id) + + def verifyNftablesRulesInRouter(self, router, rules): + if router.vpcid: + table = "ip4_acl" + else: + table = "ip4_firewall" + for rule in rules: + cmd = "nft list chain ip %s %s" % (table, rule["chain"]) + res = self.run_command_in_router(router, cmd) + if "exists" not in rule or rule["exists"]: + exists = True + else: + exists = False + if exists and not rule["rule"] in res: + self.fail("The nftables rule (%s) should exist but is not found in the VR !!!" % rule["rule"]) + if not exists and rule["rule"] in res: + self.fail("The nftables rule (%s) should not exist but is found in the VR !!!" % rule["rule"]) + self.message("The nftables rules look good so far.") + + def verifyPingFromRouter(self, router, vm, expected=True, retries=2): + while retries > 0: + cmd_ping_vm = "ping -c1 -W1 %s" % vm.ipaddress + try: + result = self.run_command_in_router(router, cmd_ping_vm) + if "0 packets received" in result: + retries = retries - 1 + self.message("No packets received, remaining retries %s" % retries) + if retries > 0: + time.sleep(WAIT_INTERVAL) + else: + self.message("packets are received, looks good") + return + except Exception as ex: + self.fail("Failed to ping vm %s from router %s: %s" % (vm.ipaddress, router.name, ex)) + if retries == 0 and expected: + self.fail("Failed to ping vm %s from router %s, which is expected to work !!!" % (vm.ipaddress, router.name)) + if retries > 0 and not expected: + self.fail("ping vm %s from router %s works, however it is unexpected !!!" % (vm.ipaddress, router.name)) + + def verifyFrrConf(self, router, configs): + cmd = "cat /etc/frr/frr.conf" + res = self.run_command_in_router(router, cmd) + for config in configs: + if "exists" not in config or config["exists"]: + exists = True + else: + exists = False + if exists and not config["config"] in res: + self.fail("The frr config (%s) should exist but is not found in the VR !!!" % config["config"]) + if not exists and config["config"] in res: + self.fail("The frr config (%s) should not exist but is found in the VR !!!" % config["config"]) + self.message("The frr config look good so far.") + + @attr(tags=['advanced'], required_hardware=False) + def test_01_zone_subnet(self): + """ Test for subnet for zone""" + """ + # 1. Create subnet + # 2. List subnet + # 3. Update subnet + # 4. dedicate subnet to domain + # 5. released dedicated subnet + # 6. dedicate subnet to sub-domain/account + # 7. released dedicated subnet + # 8. delete subnet + """ + self.message("Running test_01_zone_subnet") + # 1. Create subnet + self.subnet_2 = ZoneIpv4Subnet.create( + self.apiclient, + zoneid=self.zone.id, + subnet=SUBNET_2_PREFIX + ".0/24" + ) + self.cleanup.append(self.subnet_2) + # 2. List subnet + subnets = ZoneIpv4Subnet.list( + self.apiclient, + id=self.subnet_2.id + ) + self.assertEqual( + isinstance(subnets, list), + True, + "List subnets for zone should return a valid list" + ) + self.assertEqual( + len(subnets) == 1, + True, + "The number of subnets for zone (%s) should be equal to 1" % (len(subnets)) + ) + self.assertEqual( + subnets[0].subnet == SUBNET_2_PREFIX + ".0/24", + True, + "The subnet of subnet for zone (%s) should be equal to %s" % (subnets[0].subnet, SUBNET_2_PREFIX + ".0/24") + ) + # 3. Update subnet + self.subnet_2.update( + self.apiclient, + subnet=SUBNET_2_PREFIX + ".0/25" + ) + subnets = ZoneIpv4Subnet.list( + self.apiclient, + id=self.subnet_2.id + ) + self.assertEqual( + isinstance(subnets, list) and len(subnets) == 1 and subnets[0].subnet == SUBNET_2_PREFIX + ".0/25", + True, + "The subnet of subnet for zone should be equal to %s" % (SUBNET_2_PREFIX + ".0/25") + ) + # 4. dedicate subnet to domain + ZoneIpv4Subnet.dedicate( + self.apiclient, + id=self.subnet_2.id, + domainid=self.domain.id + ) + subnets = ZoneIpv4Subnet.list( + self.apiclient, + id=self.subnet_2.id + ) + self.assertEqual( + isinstance(subnets, list) and len(subnets) == 1 and subnets[0].domainid == self.domain.id, + True, + "The subnet should be dedicated to domain %s" % self.domain.id + ) + # 5. released dedicated subnet + self.subnet_2.release( + self.apiclient + ) + subnets = ZoneIpv4Subnet.list( + self.apiclient, + id=self.subnet_2.id + ) + self.assertEqual( + isinstance(subnets, list) and len(subnets) == 1 and not subnets[0].domainid, + True, + "The subnet should not be dedicated to domain %s" % self.domain.id + ) + # 6. dedicate subnet to sub-domain/account + ZoneIpv4Subnet.dedicate( + self.apiclient, + id=self.subnet_2.id, + domainid=self.sub_domain.id, + account=self.regular_user.name + ) + subnets = ZoneIpv4Subnet.list( + self.apiclient, + id=self.subnet_2.id + ) + self.assertEqual( + isinstance(subnets, list) and len(subnets) == 1 + and subnets[0].domainid == self.sub_domain.id and subnets[0].account == self.regular_user.name, + True, + "The subnet should be dedicated to account %s" % self.regular_user.name + ) + # 7. released dedicated subnet + self.subnet_2.release( + self.apiclient + ) + subnets = ZoneIpv4Subnet.list( + self.apiclient, + id=self.subnet_2.id + ) + self.assertEqual( + isinstance(subnets, list) and len(subnets) == 1 and not subnets[0].domainid, + True, + "The subnet should not be dedicated to account %s" % self.regular_user.name + ) + # 8. delete subnet + self.subnet_2.delete( + self.apiclient + ) + self.cleanup.remove(self.subnet_2) + + @attr(tags=['advanced'], required_hardware=False) + def test_02_create_network_routed_mode_with_specified_cidr(self): + """ Test for guest network with specified cidr""" + """ + # 1. Create Isolated network + # 2. List subnet for network by subnet + # 3. Delete the network + # 4. List subnet for network by subnet. the subnet should be gone as well + """ + self.message("Running test_02_create_network_routed_mode_with_specified_cidr") + + # 1. Create Isolated network + isolated_network = Network.create( + self.apiclient, + self.services["network"], + gateway=NETWORK_CIDR_PREFIX + ".1", + netmask="255.255.255.0", + networkofferingid=self.network_offering_isolated.id, + zoneid=self.zone.id + ) + self.cleanup.append(isolated_network) + + # 2. List subnet for network by subnet + subnets = Ipv4SubnetForGuestNetwork.list( + self.apiclient, + subnet=NETWORK_CIDR_PREFIX + ".0/24" + ) + self.assertEqual( + isinstance(subnets, list) and len(subnets) == 1 + and subnets[0].subnet == NETWORK_CIDR_PREFIX + ".0/24" and subnets[0].state == "Allocated", + True, + "The subnet should be added for network %s" % isolated_network.name + ) + + # 3. Delete the network + isolated_network.delete(self.apiclient) + self.cleanup.remove(isolated_network) + + # 4. List subnet for network by subnet. the subnet should be gone as well + network_cidr = subnets[0].subnet + subnets = Ipv4SubnetForGuestNetwork.list( + self.apiclient, + subnet=network_cidr + ) + self.assertEqual( + not isinstance(subnets, list) or len(subnets) == 0, + True, + "The subnet %s should be removed for network %s" % (network_cidr, isolated_network.name) + ) + + @attr(tags=['advanced'], required_hardware=False) + def test_03_create_subnets_for_guest_network(self): + """ Test for subnets for guest network with cidr/cidrsize""" + """ + # 1. Create subnet with cidr for guest network + # 2. List subnets for network + # 3. delete subnet for network + + # 4. Create subnet with cidrsize + # 5. List subnet for network + # 6. delete subnet for network + """ + self.message("Running test_03_create_subnets_for_guest_network") + + # 1. Create subnet with cidr for guest network + subnet_network_1 = Ipv4SubnetForGuestNetwork.create( + self.apiclient, + parentid=self.subnet_1.id, + subnet=SUBNET_1_PREFIX + ".0/26" + ) + self.cleanup.append(subnet_network_1) + + # 2. List subnets for network + subnets = Ipv4SubnetForGuestNetwork.list( + self.apiclient, + subnet=subnet_network_1.subnet + ) + self.assertEqual( + isinstance(subnets, list) and len(subnets) == 1, + True, + "The subnet should be created for subnet_network_1 %s" % subnet_network_1.subnet + ) + + # 3. delete subnet for network + subnet_network_1.delete(self.apiclient) + self.cleanup.remove(subnet_network_1) + + # 4. Create subnet with cidrsize + subnet_network_2 = Ipv4SubnetForGuestNetwork.create( + self.apiclient, + parentid=self.subnet_1.id, + cidrsize=26 + ) + self.cleanup.append(subnet_network_2) + # 5. List subnet for network + subnets = Ipv4SubnetForGuestNetwork.list( + self.apiclient, + subnet=subnet_network_2.subnet + ) + self.assertEqual( + isinstance(subnets, list) and len(subnets) == 1, + True, + "The subnet should be created for subnet_network_2 %s" % subnet_network_2.subnet + ) + + # 6. delete subnet for network + subnet_network_2.delete(self.apiclient) + self.cleanup.remove(subnet_network_2) + + @attr(tags=['advanced'], required_hardware=False) + def test_04_create_isolated_network_routed_mode_with_cidrsize(self): + """ Test for subnet and guest network with cidrsize""" + """ + # 1. Create Isolated network with cidrsize + # 2. List subnet for network by networkid + # 3. Delete the network + # 4. List subnet for network by networkid, it should be removed + """ + self.message("Running test_04_create_isolated_network_routed_mode_with_cidrsize") + + # 1. Create Isolated network with cidrsize + isolated_network = Network.create( + self.apiclient, + self.services["network"], + networkofferingid=self.network_offering_isolated.id, + zoneid=self.zone.id, + cidrsize=26 + ) + self.cleanup.append(isolated_network) + + # 2. List subnet for network by networkid + subnets = Ipv4SubnetForGuestNetwork.list( + self.apiclient, + networkid=isolated_network.id + ) + self.assertEqual( + isinstance(subnets, list) and len(subnets) == 1 + and subnets[0].networkid == isolated_network.id and subnets[0].state == "Allocated", + True, + "The subnet should be created for isolated_network %s" % isolated_network.name + ) + + # 3. Delete the network + isolated_network.delete(self.apiclient) + self.cleanup.remove(isolated_network) + + # 4. List subnet for network by network cidr, it should be removed + network_cidr = subnets[0].subnet + subnets = Ipv4SubnetForGuestNetwork.list( + self.apiclient, + subnet=network_cidr + ) + self.assertEqual( + not isinstance(subnets, list) or len(subnets) == 0, + True, + "The subnet should be removed for isolated_network %s" % isolated_network.name + ) + + @attr(tags=['advanced'], required_hardware=False) + def test_05_create_vpc_routed_mode_with_cidrsize(self): + """ Test for Routed VPC with cidrsize""" + """ + # 1. Create VPC with cidrsize + # 2. List subnet for network by vpcid + # 3. Delete the VPC + # 4. List subnet for network by vpcid, it should be removed + """ + self.message("Running test_05_create_vpc_routed_mode_with_cidrsize") + + # 1. Create VPC with cidrsize + del self.services["vpc"]["cidr"] + vpc = VPC.create(self.apiclient, + self.services["vpc"], + vpcofferingid=self.vpc_offering.id, + zoneid=self.zone.id, + cidrsize=26, + start=False + ) + self.cleanup.append(vpc) + + # 2. List subnet for network by networkid + subnets = Ipv4SubnetForGuestNetwork.list( + self.apiclient, + vpcid=vpc.id + ) + self.assertEqual( + isinstance(subnets, list) and len(subnets) == 1 + and subnets[0].vpcid == vpc.id and subnets[0].state == "Allocated", + True, + "The subnet should be created for vpc %s" % vpc.name + ) + + # 3. Delete the VPC + vpc.delete(self.apiclient) + self.cleanup.remove(vpc) + + # 4. List subnet for network by vpc cidr, it should be removed + vpc_cidr = subnets[0].subnet + subnets = Ipv4SubnetForGuestNetwork.list( + self.apiclient, + subnet=vpc_cidr + ) + self.assertEqual( + not isinstance(subnets, list) or len(subnets) == 0, + True, + "The subnet should be removed for vpc %s" % vpc.name + ) + + @attr(tags=['advanced'], required_hardware=False) + def test_06_isolated_network_with_routed_mode(self): + """ Test for Isolated Network with Routed mode""" + """ + # 1. Create Isolated network + # 2. Create VM in the network + """ + self.message("Running test_06_isolated_network_with_routed_mode") + + # 1. Create Isolated network + global test_network + test_network = Network.create( + self.apiclient, + self.services["network"], + networkofferingid=self.network_offering_isolated.id, + zoneid=self.zone.id, + domainid=self.sub_domain.id, + accountid=self.regular_user.name, + gateway=NETWORK_CIDR_PREFIX + ".1", + netmask="255.255.255.0" + ) + self._cleanup.append(test_network) + + # 2. Create VM in the network + global test_network_vm + test_network_vm = VirtualMachine.create( + self.regular_user_apiclient, + self.services["virtual_machine"], + zoneid=self.zone.id, + domainid=self.sub_domain.id, + accountid=self.regular_user.name, + networkids=test_network.id, + serviceofferingid=self.service_offering.id, + templateid=self.template.id) + self._cleanup.append(test_network_vm) + + @attr(tags=['advanced'], required_hardware=False) + def test_07_vpc_and_tier_with_routed_mode(self): + """ Test for VPC/tier with Routed mode""" + """ + # 1. Create VPC + # 2. Create Network ACL (egress = Deny, ingress = Deny) + # 3. Create VPC tier with Network ACL in the VPC + # 4. Create VM in the VPC tier + """ + self.message("Running test_07_vpc_and_tier_with_routed_mode") + + # 1. Create VPC + self.services["vpc"]["cidr"] = VPC_CIDR_PREFIX + ".0.0/22" + global test_vpc + test_vpc = VPC.create(self.apiclient, + self.services["vpc"], + vpcofferingid=self.vpc_offering.id, + zoneid=self.zone.id, + domainid=self.sub_domain.id, + account=self.regular_user.name, + start=False + ) + self._cleanup.append(test_vpc) + + # 2. Create Network ACL (egress = Deny, ingress = Deny) + global test_network_acl + test_network_acl = NetworkACLList.create(self.apiclient, + services={}, + name="test-network-acl", + description="test-network-acl", + vpcid=test_vpc.id + ) + + # 3. Create VPC tier with Network ACL in the VPC + global test_vpc_tier + test_vpc_tier = Network.create(self.regular_user_apiclient, + self.services["network"], + networkofferingid=self.vpc_network_offering.id, + zoneid=self.zone.id, + domainid=self.sub_domain.id, + accountid=self.regular_user.name, + vpcid=test_vpc.id, + gateway=VPC_CIDR_PREFIX + ".1.1", + netmask="255.255.255.0", + aclid=test_network_acl.id + ) + self._cleanup.append(test_vpc_tier) + + # 4. Create VM in the VPC tier + global test_vpc_vm + test_vpc_vm = VirtualMachine.create( + self.regular_user_apiclient, + self.services["virtual_machine"], + zoneid=self.zone.id, + domainid=self.sub_domain.id, + accountid=self.regular_user.name, + networkids=test_vpc_tier.id, + serviceofferingid=self.service_offering.id, + templateid=self.template.id) + self._cleanup.append(test_vpc_vm) + + @attr(tags=['advanced'], required_hardware=False) + def test_08_vpc_and_tier_failed_cases(self): + """ Test for VPC/tier with Routed mode (some failed cases)""" + """ + # 1. create VPC with Routed mode + # 2. create network offering with NATTED mode, create vpc tier, it should fail + # 3. create vpc tier not in the vpc cidr, it should fail + """ + + self.message("Running test_08_vpc_and_tier_failed_cases") + + # 1. Create VPC + self.services["vpc"]["cidr"] = VPC_CIDR_PREFIX + ".8.0/22" + test_vpc_2 = VPC.create(self.apiclient, + self.services["vpc"], + vpcofferingid=self.vpc_offering.id, + zoneid=self.zone.id, + domainid=self.sub_domain.id, + account=self.regular_user.name, + start=False + ) + self.cleanup.append(test_vpc_2) + + # 2. create network offering with NATTED mode, create vpc tier, it should fail + nw_offering_isolated_vpc = NetworkOffering.create( + self.apiclient, + self.services["nw_offering_isolated_vpc"] + ) + self.cleanup.append(nw_offering_isolated_vpc) + nw_offering_isolated_vpc.update(self.apiclient, state='Enabled') + try: + test_vpc_tier_2 = Network.create(self.regular_user_apiclient, + self.services["network"], + networkofferingid=nw_offering_isolated_vpc.id, + zoneid=self.zone.id, + domainid=self.sub_domain.id, + accountid=self.regular_user.name, + vpcid=test_vpc_2.id, + gateway=VPC_CIDR_PREFIX + ".1.1", + netmask="255.255.255.0" + ) + self.cleanup.append(test_vpc_tier_2) + self.fail("Created vpc network successfully, but expected to fail") + except Exception as ex: + self.message("Failed to create vpc network due to %s, which is expected behaviour" % ex) + + # 3. create vpc tier not in the vpc cidr, it should fail + try: + test_vpc_tier_3 = Network.create(self.regular_user_apiclient, + self.services["network"], + networkofferingid=self.vpc_network_offering.id, + zoneid=self.zone.id, + domainid=self.sub_domain.id, + accountid=self.regular_user.name, + vpcid=test_vpc_2.id, + gateway=VPC_CIDR_PREFIX + ".31.1", + netmask="255.255.255.0" + ) + self.cleanup.append(test_vpc_tier_3) + self.fail("Created vpc network successfully, but expected to fail") + except Exception as ex: + self.message("Failed to create vpc network due to %s, which is expected behaviour" % ex) + + @attr(tags=['advanced'], required_hardware=False) + def test_09_connectivity_between_network_and_vpc_tier(self): + """ Test for connectivity between VMs in the Isolated Network and VPC/tier""" + """ + # 0. Get static routes of Network/VPC + # 1. Add static routes in VRs manually + + # 2. Test VM2 in VR1-Network (ping/ssh should fail) + # 3. Test VM1 in VR2-VPC (ping/ssh should fail) + + # 4. Create Ingress rules in Network ACL for VPC + # 5. Create Egress rules in Network ACL for VPC + # 6. Test VM2 in VR1-Network (ping/ssh should succeed) + # 7. Test VM1 in VR2-VPC (ping/ssh should fail) + + # 8. Create IPv4 firewalls for Isolated network + # 9. Test VM2 in VR1-Network (ping/ssh should succeed) + # 10. Test VM1 in VR2-VPC (ping/ssh should succeed) + + # 11. Delete Network ACL rules for VPC + # 12. Delete IPv4 firewall rules for Network + # 13. Test VM2 in VR1-Network (ping/ssh should fail) + # 14. Test VM1 in VR2-VPC (ping/ssh should fail) + + """ + self.message("Running test_09_connectivity_between_network_and_vpc_tier") + + # 0. Get static routes of Network/VPC + network_ip4routes = [] + if test_network: + network_ip4routes = Network.list( + self.apiclient, + id=test_network.id, + listall=True + )[0].ip4routes + else: + self.skipTest("test_network is not created") + + vpc_ip4routes = [] + if test_vpc: + vpc_ip4routes = VPC.list( + self.apiclient, + id=test_vpc.id, + listall=True + )[0].ip4routes + else: + self.skipTest("test_vpc is not created") + + network_router = self.get_router(networkid=test_network.id) + vpc_router = self.get_router(vpcid=test_vpc.id) + + # Test VM1 in VR1-Network (wait until ping works) + self.verifyPingFromRouter(network_router, test_network_vm, retries=MAX_RETRIES) + # Test VM2 in VR2-VPC (wait until ping works) + self.verifyPingFromRouter(vpc_router, test_vpc_vm, retries=MAX_RETRIES) + + # 1. Add static routes in VRs manually + if not network_router or not vpc_router: + self.skipTest("network_router (%s) or vpc_router (%s) does not exist" % (network_router, vpc_router)) + for ip4route in network_ip4routes: + self.run_command_in_router(vpc_router, "ip route add %s via %s" % (ip4route.subnet, ip4route.gateway)) + for ip4route in vpc_ip4routes: + self.run_command_in_router(network_router, "ip route add %s via %s" % (ip4route.subnet, ip4route.gateway)) + + # 2. Test VM2 in VR1-Network (ping/ssh should fail) + self.verifyPingFromRouter(network_router, test_vpc_vm, expected=False) + # 3. Test VM1 in VR2-VPC (ping/ssh should fail) + self.verifyPingFromRouter(vpc_router, test_network_vm, expected=False) + + vpc_router_rules = [{"chain": "FORWARD", + "rule": "ip daddr %s jump eth2_ingress_policy" % test_vpc_tier.cidr}, + {"chain": "FORWARD", + "rule": "ip saddr %s jump eth2_egress_policy" % test_vpc_tier.cidr}] + vpc_acl_rules = [] + # 4. Create Ingress rules in Network ACL for VPC + rule = {} + rule["traffictype"] = "Ingress" + rule["cidrlist"] = test_network.cidr + rule["protocol"] = "icmp" + rule["icmptype"] = -1 + rule["icmpcode"] = -1 + vpc_acl_rules.append(self.createNetworkAclRule(rule)) + vpc_router_rules.append({"chain": "eth2_ingress_policy", + "rule": "ip saddr %s icmp type %s accept" % (test_network.cidr, ICMPv4_ALL_TYPES)}) + self.verifyNftablesRulesInRouter(vpc_router, vpc_router_rules) + + rule = {} + rule["traffictype"] = "Ingress" + rule["cidrlist"] = test_network.cidr + rule["protocol"] = "tcp" + rule["startport"] = 22 + rule["endport"] = 22 + vpc_acl_rules.append(self.createNetworkAclRule(rule)) + vpc_router_rules.append({"chain": "eth2_ingress_policy", + "rule": "ip saddr %s tcp dport 22 accept" % test_network.cidr}) + self.verifyNftablesRulesInRouter(vpc_router, vpc_router_rules) + + rule = {} + rule["traffictype"] = "Ingress" + rule["cidrlist"] = network_router.publicip + "/32" + rule["protocol"] = "icmp" + rule["icmptype"] = -1 + rule["icmpcode"] = -1 + vpc_acl_rules.append(self.createNetworkAclRule(rule)) + vpc_router_rules.append({"chain": "eth2_ingress_policy", + "rule": "ip saddr %s icmp type %s accept" % (network_router.publicip, ICMPv4_ALL_TYPES)}) + self.verifyNftablesRulesInRouter(vpc_router, vpc_router_rules) + + # 5. Create Egress rules in Network ACL for VPC + rule = {} + rule["traffictype"] = "Egress" + rule["protocol"] = "icmp" + rule["icmptype"] = -1 + rule["icmpcode"] = -1 + vpc_acl_rules.append(self.createNetworkAclRule(rule)) + vpc_router_rules.append({"chain": "eth2_egress_policy", + "rule": "ip daddr 0.0.0.0/0 icmp type %s accept" % ICMPv4_ALL_TYPES}) + self.verifyNftablesRulesInRouter(vpc_router, vpc_router_rules) + + # 6. Test VM2 in VR1-Network (ping/ssh should succeed) + self.verifyPingFromRouter(network_router, test_vpc_vm, expected=True) + # 7. Test VM1 in VR2-VPC (ping/ssh should fail) + self.verifyPingFromRouter(vpc_router, test_network_vm, expected=False) + + network_router_rules = [{"chain": "FORWARD", + "rule": "ip daddr %s jump fw_chain_ingress" % test_network.cidr}, + {"chain": "FORWARD", + "rule": "ip saddr %s jump fw_chain_egress" % test_network.cidr}] + network_routing_firewall_rules = [] + # 8. Create IPv4 firewalls for Isolated network + rule = {} + rule["traffictype"] = "Ingress" + rule["cidrlist"] = test_vpc.cidr + rule["protocol"] = "icmp" + rule["icmptype"] = -1 + rule["icmpcode"] = -1 + network_routing_firewall_rules.append(self.createIpv4RoutingFirewallRule(rule)) + network_router_rules.append({"chain": "fw_chain_ingress", + "rule": "ip saddr %s ip daddr 0.0.0.0/0 icmp type %s accept" % (test_vpc.cidr, ICMPv4_ALL_TYPES)}) + self.verifyNftablesRulesInRouter(network_router, network_router_rules) + + rule = {} + rule["traffictype"] = "Ingress" + rule["cidrlist"] = test_vpc.cidr + rule["protocol"] = "tcp" + rule["startport"] = 22 + rule["endport"] = 22 + network_routing_firewall_rules.append(self.createIpv4RoutingFirewallRule(rule)) + network_router_rules.append({"chain": "fw_chain_ingress", + "rule": "ip saddr %s ip daddr 0.0.0.0/0 tcp dport 22 accept" % test_vpc.cidr}) + self.verifyNftablesRulesInRouter(network_router, network_router_rules) + + rule = {} + rule["traffictype"] = "Ingress" + rule["cidrlist"] = vpc_router.publicip + "/32" + rule["protocol"] = "icmp" + rule["icmptype"] = -1 + rule["icmpcode"] = -1 + network_routing_firewall_rules.append(self.createIpv4RoutingFirewallRule(rule)) + network_router_rules.append({"chain": "fw_chain_ingress", + "rule": "ip saddr %s ip daddr 0.0.0.0/0 icmp type %s accept" % (vpc_router.publicip, ICMPv4_ALL_TYPES)}) + self.verifyNftablesRulesInRouter(network_router, network_router_rules) + + # 9. Test VM2 in VR1-Network (ping/ssh should succeed) + self.verifyPingFromRouter(network_router, test_vpc_vm, expected=True) + # 10. Test VM1 in VR2-VPC (ping/ssh should succeed) + self.verifyPingFromRouter(vpc_router, test_network_vm, expected=True) + + # 11. Delete Network ACL rules for VPC + for rule in vpc_acl_rules: + rule.delete(self.apiclient) + vpc_router_rules[2] = {"chain": "eth2_ingress_policy", + "rule": "ip saddr %s icmp type %s accept" % (test_network.cidr, ICMPv4_ALL_TYPES), + "exists": False} + vpc_router_rules[3] = {"chain": "eth2_ingress_policy", + "rule": "ip saddr %s tcp dport 22 accept" % test_network.cidr, + "exists": False} + vpc_router_rules[4] = {"chain": "eth2_egress_policy", + "rule": "ip daddr 0.0.0.0/0 icmp type %s accept" % ICMPv4_ALL_TYPES, + "exists": False} + vpc_router_rules[5] = {"chain": "eth2_ingress_policy", + "rule": "ip saddr %s icmp type %s accept" % (network_router.publicip, ICMPv4_ALL_TYPES), + "exists": False} + self.verifyNftablesRulesInRouter(vpc_router, vpc_router_rules) + + # 12. Delete IPv4 firewall rules for Network + for rule in network_routing_firewall_rules: + rule.delete(self.apiclient) + network_router_rules[2] = {"chain": "fw_chain_ingress", + "rule": "ip saddr %s ip daddr 0.0.0.0/0 icmp type %s accept" % (test_vpc.cidr, ICMPv4_ALL_TYPES), + "exists": False} + network_router_rules[3] = {"chain": "fw_chain_ingress", + "rule": "ip saddr %s ip daddr 0.0.0.0/0 tcp dport 22 accept" % test_vpc.cidr, + "exists": False} + network_router_rules[4] = {"chain": "fw_chain_ingress", + "rule": "ip saddr %s ip daddr 0.0.0.0/0 icmp type %s accept" % (vpc_router.publicip, ICMPv4_ALL_TYPES), + "exists": False} + self.verifyNftablesRulesInRouter(network_router, network_router_rules) + + # 13. Test VM2 in VR1-Network (ping/ssh should fail) + self.verifyPingFromRouter(network_router, test_vpc_vm, expected=False) + # 14. Test VM1 in VR2-VPC (ping/ssh should fail) + self.verifyPingFromRouter(vpc_router, test_network_vm, expected=False) + + @attr(tags=['advanced'], required_hardware=False) + def test_10_bgp_peers(self): + """ Test for BGP peers""" + """ + # 1. Create bgppeer + # 2. List bgppeer + # 3. Update bgppeer + # 4. dedicate bgppeer to domain + # 5. released dedicated bgppeer + # 6. dedicate bgppeer to sub-domain/account + # 7. released dedicated bgppeer + # 8. delete bgppeer + """ + self.message("Running test_10_bgp_peers") + # 1. Create bgp peer + bgppeer_1 = BgpPeer.create( + self.apiclient, + zoneid=self.zone.id, + asnumber=ASN_1, + ipaddress=IP4_ADDR_1 + ) + self.cleanup.append(bgppeer_1) + # 2. List bgp peer + bgppeers = BgpPeer.list( + self.apiclient, + id=bgppeer_1.id + ) + self.assertEqual( + isinstance(bgppeers, list), + True, + "List bgppeers for zone should return a valid list" + ) + self.assertEqual( + len(bgppeers) == 1, + True, + "The number of bgp peers (%s) should be equal to 1" % (len(bgppeers)) + ) + self.assertEqual( + bgppeers[0].asnumber == ASN_1 and bgppeers[0].ipaddress == IP4_ADDR_1, + True, + "The asnumber of bgp peer (%s) should be equal to %s, the ip address (%s) should be %s" + % (bgppeers[0].asnumber, ASN_1, bgppeers[0].ipaddress, IP4_ADDR_1) + ) + # 3. Update bgp peer + bgppeer_1.update( + self.apiclient, + asnumber=ASN_2, + ipaddress=IP4_ADDR_2 + ) + bgppeers = BgpPeer.list( + self.apiclient, + id=bgppeer_1.id + ) + self.assertEqual( + isinstance(bgppeers, list) and len(bgppeers) == 1 + and bgppeers[0].asnumber == ASN_2 and bgppeers[0].ipaddress == IP4_ADDR_2, + True, + "The asnumber of bgp peer (%s) should be equal to %s, the ip address (%s) should be %s" + % (bgppeers[0].asnumber, ASN_2, bgppeers[0].ipaddress, IP4_ADDR_2) + ) + # 4. dedicate bgp peer to domain + BgpPeer.dedicate( + self.apiclient, + id=bgppeer_1.id, + domainid=self.domain.id + ) + bgppeers = BgpPeer.list( + self.apiclient, + id=bgppeer_1.id + ) + self.assertEqual( + isinstance(bgppeers, list) and len(bgppeers) == 1 and bgppeers[0].domainid == self.domain.id, + True, + "The bgppeer should be dedicated to domain %s" % self.domain.id + ) + # 5. released dedicated bgp peer + bgppeer_1.release( + self.apiclient + ) + bgppeers = BgpPeer.list( + self.apiclient, + id=bgppeer_1.id + ) + self.assertEqual( + isinstance(bgppeers, list) and len(bgppeers) == 1 and not bgppeers[0].domainid, + True, + "The bgp peer should not be dedicated to domain %s" % self.domain.id + ) + # 6. dedicate bgp peer to sub-domain/account + BgpPeer.dedicate( + self.apiclient, + id=bgppeer_1.id, + domainid=self.sub_domain.id, + account=self.regular_user.name + ) + bgppeers = BgpPeer.list( + self.apiclient, + id=bgppeer_1.id + ) + self.assertEqual( + isinstance(bgppeers, list) and len(bgppeers) == 1 + and bgppeers[0].domainid == self.sub_domain.id and bgppeers[0].account == self.regular_user.name, + True, + "The bgp peer should be dedicated to account %s" % self.regular_user.name + ) + # 7. released dedicated bgp peer + bgppeer_1.release( + self.apiclient + ) + bgppeers = BgpPeer.list( + self.apiclient, + id=bgppeer_1.id + ) + self.assertEqual( + isinstance(bgppeers, list) and len(bgppeers) == 1 and not bgppeers[0].domainid, + True, + "The bgppeer should not be dedicated to account %s" % self.regular_user.name + ) + # 8. delete bgp peer + bgppeer_1.delete( + self.apiclient + ) + self.cleanup.remove(bgppeer_1) + + @attr(tags=['advanced'], required_hardware=False) + def test_11_isolated_network_with_dynamic_routed_mode(self): + """ Test for Isolated Network with Dynamic Routed mode""" + """ + # 1. Create Isolated network with bgp_peer_1 + # 2. Create VM in the network + # 3. Verify frr.conf in network VR + # 4. Update network BGP peers (to bgp_peer_1 and bgp_peer_2) + # 5. Verify frr.conf in network VR + # 6. Reboot VR + # 7. Verify frr.conf in network VR + # 8. Update network BGP peers (to bgppeer_2) + # 9. Verify frr.conf in network VR + # 10. Update network BGP peers (to null) + # 11. Verify frr.conf in network VR + """ + self.message("Running test_11_isolated_network_with_dynamic_routed_mode") + + # 1. Create bgp peers + bgppeer_1 = BgpPeer.create( + self.apiclient, + zoneid=self.zone.id, + asnumber=ASN_1, + ipaddress=IP4_ADDR_1, + password=PASSWORD_1 + ) + self.cleanup.append(bgppeer_1) + + # 1. Create Isolated network with Dynamic routing + test_network_dynamic = Network.create( + self.apiclient, + self.services["network"], + networkofferingid=self.network_offering_dynamic.id, + zoneid=self.zone.id, + domainid=self.sub_domain.id, + accountid=self.regular_user.name, + gateway=NETWORK_CIDR_PREFIX_DYNAMIC + ".1", + netmask="255.255.255.0", + bgppeerids=bgppeer_1.id + ) + self.cleanup.append(test_network_dynamic) + + # 2. Create VM in the network + test_network_dynamic_vm = VirtualMachine.create( + self.regular_user_apiclient, + self.services["virtual_machine"], + zoneid=self.zone.id, + domainid=self.sub_domain.id, + accountid=self.regular_user.name, + networkids=test_network_dynamic.id, + serviceofferingid=self.service_offering.id, + templateid=self.template.id) + self.cleanup.append(test_network_dynamic_vm) + + network_router = self.get_router(networkid=test_network_dynamic.id) + + # 3. Verify frr.conf in network VR + frr_configs = [{"config": "neighbor %s remote-as %s" % (bgppeer_1.ipaddress, bgppeer_1.asnumber), + "exists": True}, + {"config": "neighbor %s password %s" % (bgppeer_1.ipaddress, PASSWORD_1), + "exists": True}, + {"config": "network %s" % test_network_dynamic.cidr, + "exists": True}] + self.verifyFrrConf(network_router, frr_configs) + + # 4. Update network BGP peers (to bgp_peer_1 and bgp_peer_2) + bgppeer_2 = BgpPeer.create( + self.apiclient, + zoneid=self.zone.id, + asnumber=ASN_2, + ipaddress=IP4_ADDR_2, + password=PASSWORD_2 + ) + self.cleanup.append(bgppeer_2) + + test_network_dynamic.changeBgpPeers( + self.apiclient, + bgppeerids=[bgppeer_1.id, bgppeer_2.id] + ) + + # 5. Verify frr.conf in network VR + frr_configs = [{"config": "neighbor %s remote-as %s" % (bgppeer_1.ipaddress, bgppeer_1.asnumber), + "exists": True}, + {"config": "neighbor %s password %s" % (bgppeer_1.ipaddress, PASSWORD_1), + "exists": True}, + {"config": "neighbor %s remote-as %s" % (bgppeer_2.ipaddress, bgppeer_2.asnumber), + "exists": True}, + {"config": "neighbor %s password %s" % (bgppeer_2.ipaddress, PASSWORD_2), + "exists": True}, + {"config": "network %s" % test_network_dynamic.cidr, + "exists": True}] + self.verifyFrrConf(network_router, frr_configs) + + # 6. Reboot VR + self.rebootRouter(network_router) + + # 7. Verify frr.conf in network VR + network_router = self.get_router(networkid=test_network_dynamic.id) + self.verifyFrrConf(network_router, frr_configs) + + # 8. Update network BGP peers (to bgppeer_2) + test_network_dynamic.changeBgpPeers( + self.apiclient, + bgppeerids=[bgppeer_2.id] + ) + + # 9. Verify frr.conf in network VR + frr_configs = [{"config": "neighbor %s remote-as %s" % (bgppeer_1.ipaddress, bgppeer_1.asnumber), + "exists": False}, + {"config": "neighbor %s password %s" % (bgppeer_1.ipaddress, PASSWORD_1), + "exists": False}, + {"config": "neighbor %s remote-as %s" % (bgppeer_2.ipaddress, bgppeer_2.asnumber), + "exists": True}, + {"config": "neighbor %s password %s" % (bgppeer_2.ipaddress, PASSWORD_2), + "exists": True}, + {"config": "network %s" % test_network_dynamic.cidr, + "exists": True}] + self.verifyFrrConf(network_router, frr_configs) + + # 10. Update network BGP peers (to null) + test_network_dynamic.changeBgpPeers( + self.apiclient, + bgppeerids=[] + ) + + # 11. Verify frr.conf in network VR + frr_configs = [{"config": "neighbor %s remote-as %s" % (bgppeer_1.ipaddress, bgppeer_1.asnumber), + "exists": True}, + {"config": "neighbor %s password %s" % (bgppeer_1.ipaddress, PASSWORD_1), + "exists": True}, + {"config": "neighbor %s remote-as %s" % (bgppeer_2.ipaddress, bgppeer_2.asnumber), + "exists": True}, + {"config": "neighbor %s password %s" % (bgppeer_2.ipaddress, PASSWORD_2), + "exists": True}, + {"config": "network %s" % test_network_dynamic.cidr, + "exists": True}] + self.verifyFrrConf(network_router, frr_configs) + + @attr(tags=['advanced'], required_hardware=False) + def test_12_vpc_and_tier_with_dynamic_routed_mode(self): + """ Test for VPC/tier with Dynamic Routed mode""" + """ + # 1. Create bgp peers + # 2. Create VPC + # 3. Create Network ACL (egress = Deny, ingress = Deny) + # 4. Create VPC tier with Network ACL in the VPC + # 5. Create VM in the VPC tier + # 6. Verify frr.conf in VPC VR + # 7. Update network BGP peers (to bgp_peer_1 and bgp_peer_2) + # 8. Verify frr.conf in VPC VR + # 9. Create VPC tier-2 with Network ACL in the VPC + # 10. Create VM-2 in the VPC tier-2 + # 11. Verify frr.conf in VPC VR + # 12. Reboot VPC VR + # 13. Verify frr.conf in VPC VR + # 14. Update network BGP peers (to bgppeer_2) + # 15. Verify frr.conf in VPC VR + # 16. Update network BGP peers (to null) + # 17. Verify frr.conf in VPC VR + """ + self.message("Running test_12_vpc_and_tier_with_dynamic_routed_mode") + + # 1. Create bgp peers + bgppeer_1 = BgpPeer.create( + self.apiclient, + zoneid=self.zone.id, + asnumber=ASN_1, + ipaddress=IP4_ADDR_1, + password=PASSWORD_1 + ) + self.cleanup.append(bgppeer_1) + + # 2.1 VPC offering for static routing + vpc_offering_dynamic = VpcOffering.create( + self.apiclient, + VPC_OFFERING_DYNAMIC + ) + self.cleanup.append(vpc_offering_dynamic) + vpc_offering_dynamic.update(self.apiclient, state='Enabled') + + # 2.2 Create VPC + self.services["vpc"]["cidr"] = VPC_CIDR_PREFIX + ".8.0/22" + test_vpc_dynamic = VPC.create(self.apiclient, + self.services["vpc"], + vpcofferingid=vpc_offering_dynamic.id, + zoneid=self.zone.id, + domainid=self.sub_domain.id, + account=self.regular_user.name, + start=False, + bgppeerids=bgppeer_1.id + ) + self.cleanup.append(test_vpc_dynamic) + + # 3. Create Network ACL (egress = Deny, ingress = Deny) + test_network_acl_dynamic = NetworkACLList.create(self.apiclient, + services={}, + name="test-network-acl-dynamic", + description="test-network-acl-dynamic", + vpcid=test_vpc_dynamic.id + ) + + # 4. Create VPC tier with Network ACL in the VPC + self.services["network"]["name"] = "test_vpc_tier_dynamic_1" + test_vpc_tier_dynamic_1 = Network.create(self.regular_user_apiclient, + self.services["network"], + networkofferingid=self.vpc_network_offering_dynamic.id, + zoneid=self.zone.id, + domainid=self.sub_domain.id, + accountid=self.regular_user.name, + vpcid=test_vpc_dynamic.id, + gateway=VPC_CIDR_PREFIX + ".8.1", + netmask="255.255.255.0", + aclid=test_network_acl_dynamic.id + ) + self.cleanup.append(test_vpc_tier_dynamic_1) + + # 5. Create VM in the VPC tier + test_vpc_vm_dynamic_1 = VirtualMachine.create( + self.regular_user_apiclient, + self.services["virtual_machine"], + zoneid=self.zone.id, + domainid=self.sub_domain.id, + accountid=self.regular_user.name, + networkids=test_vpc_tier_dynamic_1.id, + serviceofferingid=self.service_offering.id, + templateid=self.template.id) + self.cleanup.append(test_vpc_vm_dynamic_1) + + vpc_router = self.get_router(vpcid=test_vpc_dynamic.id) + + # 6. Verify frr.conf in VPC VR + frr_configs = [{"config": "neighbor %s remote-as %s" % (bgppeer_1.ipaddress, bgppeer_1.asnumber), + "exists": True}, + {"config": "neighbor %s password %s" % (bgppeer_1.ipaddress, PASSWORD_1), + "exists": True}, + {"config": "network %s" % test_vpc_tier_dynamic_1.cidr, + "exists": True}] + self.verifyFrrConf(vpc_router, frr_configs) + + # 7. Update VPC BGP peers (to bgp_peer_1 and bgp_peer_2) + bgppeer_2 = BgpPeer.create( + self.apiclient, + zoneid=self.zone.id, + asnumber=ASN_2, + ipaddress=IP4_ADDR_2, + password=PASSWORD_2 + ) + self.cleanup.append(bgppeer_2) + + test_vpc_dynamic.changeBgpPeers( + self.apiclient, + bgppeerids=[bgppeer_1.id, bgppeer_2.id] + ) + + # 8. Verify frr.conf in VPC VR + frr_configs = [{"config": "neighbor %s remote-as %s" % (bgppeer_1.ipaddress, bgppeer_1.asnumber), + "exists": True}, + {"config": "neighbor %s password %s" % (bgppeer_1.ipaddress, PASSWORD_1), + "exists": True}, + {"config": "neighbor %s remote-as %s" % (bgppeer_2.ipaddress, bgppeer_2.asnumber), + "exists": True}, + {"config": "neighbor %s password %s" % (bgppeer_2.ipaddress, PASSWORD_2), + "exists": True}, + {"config": "network %s" % test_vpc_tier_dynamic_1.cidr, + "exists": True}] + self.verifyFrrConf(vpc_router, frr_configs) + + # 9. Create VPC tier-2 with Network ACL in the VPC + self.services["network"]["name"] = "test_vpc_tier_dynamic_2" + test_vpc_tier_dynamic_2 = Network.create(self.regular_user_apiclient, + self.services["network"], + networkofferingid=self.vpc_network_offering_dynamic.id, + zoneid=self.zone.id, + domainid=self.sub_domain.id, + accountid=self.regular_user.name, + vpcid=test_vpc_dynamic.id, + gateway=VPC_CIDR_PREFIX + ".9.1", + netmask="255.255.255.0", + aclid=test_network_acl_dynamic.id + ) + self.cleanup.append(test_vpc_tier_dynamic_2) + + # 10. Create VM-2 in the VPC tier-2 + test_vpc_vm_dynamic_2 = VirtualMachine.create( + self.regular_user_apiclient, + self.services["virtual_machine"], + zoneid=self.zone.id, + domainid=self.sub_domain.id, + accountid=self.regular_user.name, + networkids=test_vpc_tier_dynamic_2.id, + serviceofferingid=self.service_offering.id, + templateid=self.template.id) + self.cleanup.append(test_vpc_vm_dynamic_2) + + # 11. Verify frr.conf in VPC VR + frr_configs = [{"config": "neighbor %s remote-as %s" % (bgppeer_1.ipaddress, bgppeer_1.asnumber), + "exists": True}, + {"config": "neighbor %s password %s" % (bgppeer_1.ipaddress, PASSWORD_1), + "exists": True}, + {"config": "neighbor %s remote-as %s" % (bgppeer_2.ipaddress, bgppeer_2.asnumber), + "exists": True}, + {"config": "neighbor %s password %s" % (bgppeer_2.ipaddress, PASSWORD_2), + "exists": True}, + {"config": "network %s" % test_vpc_tier_dynamic_1.cidr, + "exists": True}, + {"config": "network %s" % test_vpc_tier_dynamic_2.cidr, + "exists": True}] + self.verifyFrrConf(vpc_router, frr_configs) + + # 12. Reboot VPC VR + self.rebootRouter(vpc_router) + + # 13. Verify frr.conf in VPC VR + vpc_router = self.get_router(vpcid=test_vpc_dynamic.id) + self.verifyFrrConf(vpc_router, frr_configs) + + # 14. Update VPC BGP peers (to bgppeer_2) + test_vpc_dynamic.changeBgpPeers( + self.apiclient, + bgppeerids=[bgppeer_2.id] + ) + + # 15. Verify frr.conf in VPC VR + frr_configs = [{"config": "neighbor %s remote-as %s" % (bgppeer_1.ipaddress, bgppeer_1.asnumber), + "exists": False}, + {"config": "neighbor %s password %s" % (bgppeer_1.ipaddress, PASSWORD_1), + "exists": False}, + {"config": "neighbor %s remote-as %s" % (bgppeer_2.ipaddress, bgppeer_2.asnumber), + "exists": True}, + {"config": "neighbor %s password %s" % (bgppeer_2.ipaddress, PASSWORD_2), + "exists": True}, + {"config": "network %s" % test_vpc_tier_dynamic_1.cidr, + "exists": True}, + {"config": "network %s" % test_vpc_tier_dynamic_2.cidr, + "exists": True}] + self.verifyFrrConf(vpc_router, frr_configs) + + # 16. Update VPC BGP peers (to null) + test_vpc_dynamic.changeBgpPeers( + self.apiclient, + bgppeerids=[] + ) + + # 17. Verify frr.conf in VPC VR + frr_configs = [{"config": "neighbor %s remote-as %s" % (bgppeer_1.ipaddress, bgppeer_1.asnumber), + "exists": True}, + {"config": "neighbor %s password %s" % (bgppeer_1.ipaddress, PASSWORD_1), + "exists": True}, + {"config": "neighbor %s remote-as %s" % (bgppeer_2.ipaddress, bgppeer_2.asnumber), + "exists": True}, + {"config": "neighbor %s password %s" % (bgppeer_2.ipaddress, PASSWORD_2), + "exists": True}, + {"config": "network %s" % test_vpc_tier_dynamic_1.cidr, + "exists": True}, + {"config": "network %s" % test_vpc_tier_dynamic_2.cidr, + "exists": True}] + self.verifyFrrConf(vpc_router, frr_configs) + + + @attr(tags=['advanced'], required_hardware=False) + def test_13_asn_ranges(self): + """ Test for ASN ranges""" + """ + # 1. Create an ASN range without overlap + # 2. List ASN ranges by zoneid + # 3. List ASN numbers by ASN range id + # 4. Create an ASN range with overlap, it should fail + # 5. Delete ASN range + """ + self.message("Running test_13_asn_ranges") + + # 1. Create an ASN range without overlap + asnrange_2 = ASNRange.create( + self.apiclient, + zoneid=self.zone.id, + startasn=END_ASN+100, + endasn=END_ASN+200 + ) + self.cleanup.append(asnrange_2) + + # 2. List ASN ranges by zoneid + ranges = ASNRange.list( + self.apiclient, + zoneid = self.zone.id + ) + self.assertEqual( + isinstance(ranges, list), + True, + "List ASN ranges by zoneid should return a valid list" + ) + self.assertEqual( + len(ranges) >= 1, + True, + "The number of ASN ranges (%s) should be at least 1" % (len(ranges)) + ) + asnrange_2_new = None + for range in ranges: + if range.startasn == asnrange_2.startasn: + asnrange_2_new = range + break + if asnrange_2_new: + self.assertEqual( + asnrange_2_new.endasn == asnrange_2.endasn, + True, + "The end ASN of ASN range (%s-%s) should be equal to %s" % (asnrange_2_new.startasn, asnrange_2_new.endasn, asnrange_2.endasn) + ) + else: + self.fail("Unable to find ASN range (%s-%s)" % (asnrange_2.startasn, asnrange_2.endasn)) + + # 3. List ASN numbers by ASN range id + asnumbers = ASNRange.listAsNumbers( + self.apiclient, + zoneid = self.zone.id, + asnrangeid = asnrange_2.id + ) + self.assertEqual( + isinstance(asnumbers, list), + True, + "List AS numbers should return a valid list" + ) + self.assertEqual( + len(asnumbers) == asnrange_2.endasn - asnrange_2.startasn + 1, + True, + "The number of asnumbers (%s) should be equal to %s" % (len(asnumbers), (asnrange_2.endasn - asnrange_2.startasn + 1)) + ) + + # 4. Create an ASN range with overlap, it should fail + try: + asnrange_3 = ASNRange.create( + self.apiclient, + zoneid=self.zone.id, + startasn=END_ASN+150, + endasn=END_ASN+250 + ) + self.cleanup.append(asnrange_3) + self.fail("Succeeded to create ASN range (%s-%s) but it should fail" % (asnrange_3.startasn, asnrange_3.endasn)) + except Exception as e: + self.message("Failed to create ASN range but it is expected") + + # 5. Delete ASN range + asnrange_2.delete( + self.apiclient + ) + self.cleanup.remove(asnrange_2) diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index 488e5efd625..20f1cb3224a 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -766,7 +766,7 @@ class TestKubernetesCluster(cloudstackTestCase): self.verifyKubernetesCluster(cluster, cluster.name, None, size, control_nodes) self.debug("Existing Kubernetes cluster available with name %s" % cluster.name) return cluster - except AssertionError as error: + except AssertionError as error: self.debug("Existing cluster failed verification due to %s, need to deploy a new one" % error) self.deleteKubernetesClusterAndVerify(cluster.id, False, True) diff --git a/test/integration/smoke/test_network.py b/test/integration/smoke/test_network.py index 8f3f4f533dd..b3e7fd3e42f 100644 --- a/test/integration/smoke/test_network.py +++ b/test/integration/smoke/test_network.py @@ -17,6 +17,8 @@ # under the License. """ BVT tests for Network Life Cycle """ +import json + # Import Local Modules from marvin.codes import (FAILED, STATIC_NAT_RULE, LB_RULE, NAT_RULE, PASS) @@ -24,7 +26,7 @@ from marvin.cloudstackTestCase import cloudstackTestCase from marvin.cloudstackException import CloudstackAPIException from marvin.cloudstackAPI import rebootRouter from marvin.sshClient import SshClient -from marvin.lib.utils import cleanup_resources, get_process_status, get_host_credentials +from marvin.lib.utils import cleanup_resources, get_process_status, get_host_credentials, random_gen from marvin.lib.base import (Account, VirtualMachine, ServiceOffering, @@ -37,7 +39,9 @@ from marvin.lib.base import (Account, LoadBalancerRule, Router, NIC, - Cluster) + Template, + Cluster, + SSHKeyPair) from marvin.lib.common import (get_domain, get_free_vlan, get_zone, @@ -58,9 +62,11 @@ from marvin.lib.decoratorGenerators import skipTestIf from ddt import ddt, data import unittest # Import System modules +import os import time import logging import random +import tempfile _multiprocess_shared_ = True @@ -2113,3 +2119,313 @@ class TestSharedNetwork(cloudstackTestCase): 0, "Failed to find the placeholder IP" ) + + +class TestSharedNetworkWithConfigDrive(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + cls.testClient = super(TestSharedNetworkWithConfigDrive, cls).getClsTestClient() + cls.apiclient = cls.testClient.getApiClient() + + cls.services = cls.testClient.getParsedTestDataConfig() + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) + cls.hv = cls.testClient.getHypervisorInfo() + + if cls.hv.lower() == 'simulator': + cls.skip = True + return + else: + cls.skip = False + + cls._cleanup = [] + + template = Template.register( + cls.apiclient, + cls.services["test_templates_cloud_init"][cls.hv.lower()], + zoneid=cls.zone.id, + hypervisor=cls.hv, + ) + template.download(cls.apiclient) + cls._cleanup.append(template) + + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + cls.services["virtual_machine"]["template"] = template.id + cls.services["virtual_machine"]["username"] = "ubuntu" + # Create Network Offering + cls.services["shared_network_offering_configdrive"]["specifyVlan"] = "True" + cls.services["shared_network_offering_configdrive"]["specifyIpRanges"] = "True" + cls.shared_network_offering = NetworkOffering.create(cls.apiclient, + cls.services["shared_network_offering_configdrive"], + conservemode=True) + + cls.isolated_network_offering = NetworkOffering.create( + cls.apiclient, + cls.services["isolated_network_offering"], + conservemode=True + ) + + # Update network offering state from disabled to enabled. + NetworkOffering.update( + cls.isolated_network_offering, + cls.apiclient, + id=cls.isolated_network_offering.id, + state="enabled" + ) + + # Update network offering state from disabled to enabled. + NetworkOffering.update(cls.shared_network_offering, cls.apiclient, state="enabled") + + cls.service_offering = ServiceOffering.create(cls.apiclient, cls.services["service_offering"]) + physical_network, vlan = get_free_vlan(cls.apiclient, cls.zone.id) + # create network using the shared network offering created + + cls.services["shared_network"]["acltype"] = "domain" + cls.services["shared_network"]["vlan"] = vlan + cls.services["shared_network"]["networkofferingid"] = cls.shared_network_offering.id + cls.services["shared_network"]["physicalnetworkid"] = physical_network.id + + cls.setSharedNetworkParams("shared_network") + cls.shared_network = Network.create(cls.apiclient, + cls.services["shared_network"], + networkofferingid=cls.shared_network_offering.id, + zoneid=cls.zone.id) + + cls.isolated_network = Network.create( + cls.apiclient, + cls.services["isolated_network"], + networkofferingid=cls.isolated_network_offering.id, + zoneid=cls.zone.id + ) + + cls._cleanup.extend([ + cls.service_offering, + cls.shared_network, + cls.shared_network_offering, + cls.isolated_network, + cls.isolated_network_offering, + ]) + cls.tmp_files = [] + cls.keypair = cls.generate_ssh_keys() + return + + @classmethod + def generate_ssh_keys(cls): + """Generates ssh key pair + + Writes the private key into a temp file and returns the file name + + :returns: generated keypair + :rtype: MySSHKeyPair + """ + cls.keypair = SSHKeyPair.create( + cls.apiclient, + name=random_gen() + ".pem") + + cls._cleanup.append(SSHKeyPair(cls.keypair.__dict__, None)) + cls.debug("Created keypair with name: %s" % cls.keypair.name) + cls.debug("Writing the private key to local file") + pkfile = tempfile.gettempdir() + os.sep + cls.keypair.name + cls.keypair.private_key_file = pkfile + cls.tmp_files.append(pkfile) + cls.debug("File path: %s" % pkfile) + with open(pkfile, "w+") as f: + f.write(cls.keypair.privatekey) + os.chmod(pkfile, 0o400) + + return cls.keypair + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + if self.skip: + self.skipTest("Hypervisor is simulator - skipping Test..") + self.cleanup = [] + + @classmethod + def tearDownClass(cls): + try: + # Cleanup resources used + cleanup_resources(cls.apiclient, cls._cleanup) + for tmp_file in cls.tmp_files: + os.remove(tmp_file) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def tearDown(self): + cleanup_resources(self.apiclient, self.cleanup) + return + + @classmethod + def setSharedNetworkParams(cls, network, range=20): + + # @range: range decides the endip. Pass the range as "x" if you want the difference between the startip + # and endip as "x" + # Set the subnet number of shared networks randomly prior to execution + # of each test case to avoid overlapping of ip addresses + shared_network_subnet_number = random.randrange(1, 254) + cls.services[network]["gateway"] = "172.16." + str(shared_network_subnet_number) + ".1" + cls.services[network]["startip"] = "172.16." + str(shared_network_subnet_number) + ".2" + cls.services[network]["endip"] = "172.16." + str(shared_network_subnet_number) + "." + str(range + 1) + cls.services[network]["netmask"] = "255.255.255.0" + logger.debug("Executing command '%s'" % cls.services[network]) + + def _mount_config_drive(self, ssh): + """ + This method is to verify whether configdrive iso + is attached to vm or not + Returns mount path if config drive is attached else None + """ + mountdir = "/root/iso" + cmd = "sudo blkid -t LABEL='config-2' " \ + "/dev/sr? /dev/hd? /dev/sd? /dev/xvd? -o device" + tmp_cmd = [ + 'sudo bash -c "if [ ! -d {0} ]; then mkdir {0}; fi"'.format(mountdir), + "sudo umount %s" % mountdir] + self.debug("Unmounting drive from %s" % mountdir) + for tcmd in tmp_cmd: + ssh.execute(tcmd) + + self.debug("Trying to find ConfigDrive device") + configDrive = ssh.execute(cmd) + if not configDrive: + self.warn("ConfigDrive is not attached") + return None + + res = ssh.execute("sudo mount {} {}".format(str(configDrive[0]), mountdir)) + if str(res).lower().find("read-only") > -1: + self.debug("ConfigDrive iso is mounted at location %s" % mountdir) + return mountdir + else: + return None + + def _umount_config_drive(self, ssh, mount_path): + """unmount config drive inside guest vm + + :param ssh: SSH connection to the VM + :type ssh: marvin.sshClient.SshClient + :type mount_path: str + """ + ssh.execute("sudo umount -d %s" % mount_path) + # Give the VM time to unlock the iso device + time.sleep(0.5) + # Verify umount + result = ssh.execute("sudo ls %s" % mount_path) + self.assertTrue(len(result) == 0, + "After umount directory should be empty " + "but contains: %s" % result) + + def _get_config_drive_data(self, ssh, file, name, fail_on_missing=True): + """Fetches the content of a file file on the config drive + + :param ssh: SSH connection to the VM + :param file: path to the file to fetch + :param name: description of the file + :param fail_on_missing: + whether the test should fail if the file is missing + :type ssh: marvin.sshClient.SshClient + :type file: str + :type name: str + :type fail_on_missing: bool + :returns: the content of the file + :rtype: str + """ + cmd = "sudo cat %s" % file + res = ssh.execute(cmd) + content = '\n'.join(res) + + if fail_on_missing and "No such file or directory" in content: + self.debug("{} is not found".format(name)) + self.fail("{} is not found".format(name)) + + return content + + def _get_ip_address_output(self, ssh): + cmd = "ip address" + res = ssh.execute(cmd) + return '\n'.join(res) + + @attr(tags=["advanced", "shared"], required_hardware="true") + def test_01_deployVMInSharedNetwork(self): + try: + self.virtual_machine = VirtualMachine.create(self.apiclient, self.services["virtual_machine"], + networkids=[self.shared_network.id, self.isolated_network.id], + serviceofferingid=self.service_offering.id, + keypair=self.keypair.name + ) + self.cleanup.append(self.virtual_machine) + except Exception as e: + self.fail("Exception while deploying virtual machine: %s" % e) + + public_ips = list_publicIP( + self.apiclient, + associatednetworkid=self.isolated_network.id + ) + public_ip = public_ips[0] + FireWallRule.create( + self.apiclient, + ipaddressid=public_ip.id, + protocol=self.services["natrule"]["protocol"], + cidrlist=['0.0.0.0/0'], + startport=self.services["natrule"]["publicport"], + endport=self.services["natrule"]["publicport"] + ) + + nat_rule = NATRule.create( + self.apiclient, + self.virtual_machine, + self.services["natrule"], + public_ip.id + ) + + private_key_file_location = self.keypair.private_key_file if self.keypair else None + ssh = self.virtual_machine.get_ssh_client(ipaddress=nat_rule.ipaddress, + keyPairFileLocation=private_key_file_location, retries=5) + + mount_path = self._mount_config_drive(ssh) + + network_data_content = self._get_config_drive_data(ssh, mount_path + "/openstack/latest/network_data.json", + "network_data") + + network_data = json.loads(network_data_content) + + self._umount_config_drive(ssh, mount_path) + + ip_address_output = self._get_ip_address_output(ssh) + + self.assertTrue('links' in network_data, "network_data.json doesn't contain links") + self.assertTrue('networks' in network_data, "network_data.json doesn't contain networks") + self.assertTrue('services' in network_data, "network_data.json doesn't contain services") + + for x in ['links', 'networks', 'services']: + self.assertTrue(x in network_data, "network_data.json doesn't contain " + x) + self.assertEqual(len(network_data[x]), 2, "network_data.json doesn't contain 2 " + x) + + self.assertIn(network_data['links'][0]['ethernet_mac_address'], + [self.virtual_machine.nic[0].macaddress, self.virtual_machine.nic[1].macaddress], + "macaddress doesn't match") + self.assertIn(network_data['links'][1]['ethernet_mac_address'], + [self.virtual_machine.nic[0].macaddress, self.virtual_machine.nic[1].macaddress], + "macaddress doesn't match") + + self.assertIn(network_data['networks'][0]['ip_address'], + [self.virtual_machine.nic[0].ipaddress, self.virtual_machine.nic[1].ipaddress], + "ip address doesn't match") + self.assertIn(network_data['networks'][1]['ip_address'], + [self.virtual_machine.nic[0].ipaddress, self.virtual_machine.nic[1].ipaddress], + "ip address doesn't match") + self.assertIn(network_data['networks'][0]['netmask'], + [self.virtual_machine.nic[0].netmask, self.virtual_machine.nic[1].netmask], + "netmask doesn't match") + self.assertIn(network_data['networks'][1]['netmask'], + [self.virtual_machine.nic[0].netmask, self.virtual_machine.nic[1].netmask], + "netmask doesn't match") + + self.assertEqual(network_data['services'][0]['type'], 'dns', "network_data.json doesn't contain dns service") + self.assertEqual(network_data['services'][1]['type'], 'dns', "network_data.json doesn't contain dns service") + + self.assertTrue(self.virtual_machine.nic[0].ipaddress in ip_address_output, "ip address doesn't match") + self.assertTrue(self.virtual_machine.nic[1].ipaddress in ip_address_output, "ip address doesn't match") diff --git a/test/integration/smoke/test_network_ipv6.py b/test/integration/smoke/test_network_ipv6.py index 2c369f28300..1e5cec7ef4c 100644 --- a/test/integration/smoke/test_network_ipv6.py +++ b/test/integration/smoke/test_network_ipv6.py @@ -394,7 +394,7 @@ class TestIpv6Network(cloudstackTestCase): cmd, hypervisor=self.routerDetailsMap[router.id]['hypervisor'] ) - self.assertTrue(type(result) == list and len(result) > 0, + self.assertTrue(type(result) == list, "%s on router %s returned invalid result" % (cmd, router.id)) result = '\n'.join(result) return result diff --git a/test/integration/smoke/test_primary_storage_scope.py b/test/integration/smoke/test_primary_storage_scope.py new file mode 100644 index 00000000000..db2cd09b616 --- /dev/null +++ b/test/integration/smoke/test_primary_storage_scope.py @@ -0,0 +1,178 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" BVT tests for Primary Storage +""" + +# Import System modules +# Import Local Modules +from marvin.cloudstackTestCase import * +from marvin.lib.base import (Host, StoragePool, Cluster, updateStoragePool, changeStoragePoolScope) +from marvin.lib.common import (get_zone, get_pod, list_clusters) +from marvin.lib.utils import cleanup_resources +from nose.plugins.attrib import attr + +class TestPrimaryStorageScope(cloudstackTestCase): + + def setUp(self): + + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.services = self.testClient.getParsedTestDataConfig() + self.cleanup = [] + self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests()) + self.pod = get_pod(self.apiclient, self.zone.id) + self.debug(self.services) + self.cluster1 = list_clusters(self.apiclient)[0] + self.debug(self.cluster1) + if (self.cluster1 == None): + cloudstackTestCase.skipTest(self, "Cluster not found. Skipping test.") + if (self.cluster1.hypervisortype not in ['KVM', 'VMware', 'Simulator']): + cloudstackTestCase.skipTest(self, "Supported hypervisors (KVM, VMware, Simulator) not found. Skipping test.") + self.cluster = { + 'clustername': 'C0_testScope', + 'clustertype': 'CloudManaged' + } + return + + def tearDown(self): + super(TestPrimaryStorageScope, self).tearDown() + + @attr(tags=["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="true") + def test_01_primary_storage_scope_change(self): + """Test primary storage pool scope change + """ + + # Create cluster + self.cluster2 = Cluster.create(self.apiclient, + self.cluster, + zoneid=self.zone.id, + podid=self.pod.id, + hypervisor=self.cluster1.hypervisortype + ) + self.cleanup.append(self.cluster2) + + # Create zone-wide storage pool + self.storage = StoragePool.create(self.apiclient, + self.services["nfs"], + scope = 'ZONE', + zoneid=self.zone.id, + hypervisor=self.cluster1.hypervisortype + ) + self.cleanup.append(self.storage) + self.debug("Created storage pool %s in zone scope", self.storage.id) + + # Disable storage pool + cmd = updateStoragePool.updateStoragePoolCmd() + cmd.id = self.storage.id + cmd.enabled = False + self.apiclient.updateStoragePool(cmd) + + self.debug("Disabled storage pool : %s" % self.storage.id) + + # Change storage pool scope to Cluster2 + cmd = changeStoragePoolScope.changeStoragePoolScopeCmd() + cmd.id = self.storage.id + cmd.scope = "CLUSTER" + cmd.clusterid = self.cluster2.id + self.apiclient.changeStoragePoolScope(cmd) + + self.debug("Changed scope of storage pool %s to cluster" % self.storage.id) + + pool_id = self.dbclient.execute("select id from storage_pool where uuid=\"" + self.storage.id + "\"")[0][0] + host1 = Host.list(self.apiclient, clusterid=self.cluster1.id, listall=True)[0] + host1_id = self.dbclient.execute("select id from host where uuid=\"" + host1.id + "\"")[0][0] + + pool_row = self.dbclient.execute("select cluster_id, pod_id, scope from storage_pool where id=" + str(pool_id))[0] + capacity_row = self.dbclient.execute("select cluster_id, pod_id from op_host_capacity where capacity_type=3 and host_id=" + str(pool_id))[0] + pool_host_rows = self.dbclient.execute("select id from storage_pool_host_ref where host_id=" + str(host1_id) + " and pool_id=" + str(pool_id)) + + self.assertIsNotNone( + pool_row[0], + "Cluster id should not be NULL for cluster scope" + ) + self.assertIsNotNone( + pool_row[1], + "Pod id should not be NULL for cluster scope" + ) + self.assertEqual( + pool_row[2], + "CLUSTER", + "Storage pool scope not changed to Cluster" + ) + self.assertIsNotNone( + capacity_row[0], + "Cluster id should not be NULL in the op_host_capacity table" + ) + self.assertIsNotNone( + capacity_row[1], + "Pod id set should not be NULL in the op_host_capacity table" + ) + self.assertEqual( + len(pool_host_rows), + 0, + "Storage pool not removed from the storage_pool_host_ref table for host on another cluster" + ) + + # Change storage pool scope to Zone + cmd = changeStoragePoolScope.changeStoragePoolScopeCmd() + cmd.id = self.storage.id + cmd.scope = "ZONE" + self.apiclient.changeStoragePoolScope(cmd) + + self.debug("Changed scope of storage pool %s to zone" % self.storage.id) + + pool_row = self.dbclient.execute("select cluster_id, pod_id, scope from storage_pool where id=" + str(pool_id))[0] + capacity_row = self.dbclient.execute("select cluster_id, pod_id from op_host_capacity where capacity_type=3 and host_id=" + str(pool_id))[0] + pool_host_rows = self.dbclient.execute("select id from storage_pool_host_ref where host_id=" + str(host1_id) + " and pool_id=" + str(pool_id)) + + self.assertIsNone( + pool_row[0], + "Cluster id not set to NULL for zone scope" + ) + self.assertIsNone( + pool_row[1], + "Pod id not set to NULL for zone scope" + ) + self.assertEqual( + pool_row[2], + "ZONE", + "Storage pool scope not changed to ZONE" + ) + self.assertIsNone( + capacity_row[0], + "Cluster id not set to NULL in the op_host_capacity table" + ) + self.assertIsNone( + capacity_row[1], + "Pod id not set to NULL in the op_host_capacity table" + ) + self.assertEqual( + len(pool_host_rows), + 1, + "Storage pool not added to the storage_pool_host_ref table for host on another cluster" + ) + + # Enable storage pool + cmd = updateStoragePool.updateStoragePoolCmd() + cmd.id = self.storage.id + cmd.enabled = True + response = self.apiclient.updateStoragePool(cmd) + self.assertEqual( + response.state, + "Up", + "Storage pool couldn't be enabled" + ) diff --git a/test/integration/smoke/test_privategw_acl.py b/test/integration/smoke/test_privategw_acl.py index 06a6241d199..b0553241dc2 100644 --- a/test/integration/smoke/test_privategw_acl.py +++ b/test/integration/smoke/test_privategw_acl.py @@ -920,6 +920,6 @@ class TestPrivateGwACL(cloudstackTestCase): (select id from physical_network where uuid='%s');" % physical_network.id ) for traffic_type in traffic_type_list: - if "Guest" in str(traffic_type[0]): + if "Guest" in str(traffic_type[0]): return physical_network return None diff --git a/test/integration/smoke/test_privategw_acl_ovs_gre.py b/test/integration/smoke/test_privategw_acl_ovs_gre.py index 94e5c3c839a..89e20af914e 100644 --- a/test/integration/smoke/test_privategw_acl_ovs_gre.py +++ b/test/integration/smoke/test_privategw_acl_ovs_gre.py @@ -683,7 +683,7 @@ class TestPrivateGwACLOvsGRE(cloudstackTestCase): (select id from physical_network where uuid='%s');" % physical_network.id ) for traffic_type in traffic_type_list: - if "Guest" in str(traffic_type[0]): + if "Guest" in str(traffic_type[0]): return physical_network return None diff --git a/test/integration/smoke/test_purge_expunged_vms.py b/test/integration/smoke/test_purge_expunged_vms.py new file mode 100644 index 00000000000..0fe55991059 --- /dev/null +++ b/test/integration/smoke/test_purge_expunged_vms.py @@ -0,0 +1,371 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" BVT tests for purging expunged VMs and their resources +""" +# Import Local Modules +from marvin.codes import FAILED +from marvin.cloudstackAPI import (purgeExpungedResources, + listInfrastructure, + listManagementServers) +from marvin.cloudstackException import CloudstackAPIException +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.lib.base import (Account, + Domain, + ServiceOffering, + DiskOffering, + NetworkOffering, + Network, + VirtualMachine, + Configurations) +from marvin.lib.common import (get_domain, + get_zone, + get_template) +from marvin.lib.utils import (random_gen) +from marvin.lib.decoratorGenerators import skipTestIf +from marvin.sshClient import SshClient +from nose.plugins.attrib import attr +import logging +# Import System modules +import time +from datetime import datetime, timedelta +import pytz +import threading + + +_multiprocess_shared_ = True +DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S" + +class TestPurgeExpungedVms(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + cls.testClient = super(TestPurgeExpungedVms, cls).getClsTestClient() + cls.apiclient = cls.testClient.getApiClient() + cls.services = cls.testClient.getParsedTestDataConfig() + cls.dbConnection = cls.testClient.getDbConnection() + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) + cls.services['mode'] = cls.zone.networktype + + cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__ + + + cls.hypervisor = cls.testClient.getHypervisorInfo().lower() + cls.hypervisorIsSimulator = False + if cls.hypervisor == 'simulator': + cls.hypervisorIsSimulator = True + + cls._cleanup = [] + cls.logger = logging.getLogger('TestPurgeExpungedVms') + cls.logger.setLevel(logging.DEBUG) + + template = get_template( + cls.apiclient, + cls.zone.id, + cls.services["ostype"]) + if template == FAILED: + assert False, "get_template() failed to return template with description %s" % cls.services["ostype"] + + # Set Zones and disk offerings + cls.services["small"]["zoneid"] = cls.zone.id + cls.services["small"]["template"] = template.id + + cls.compute_offering = ServiceOffering.create( + cls.apiclient, + cls.services["service_offerings"]["tiny"]) + cls._cleanup.append(cls.compute_offering) + + cls.purge_resource_compute_offering = ServiceOffering.create( + cls.apiclient, + cls.services["service_offerings"]["tiny"], + purgeresources=True) + cls._cleanup.append(cls.purge_resource_compute_offering) + + cls.disk_offering = DiskOffering.create( + cls.apiclient, + cls.services["disk_offering"] + ) + cls._cleanup.append(cls.disk_offering) + + cls.network_offering = NetworkOffering.create( + cls.apiclient, + cls.services["l2-network_offering"], + ) + cls._cleanup.append(cls.network_offering) + cls.network_offering.update(cls.apiclient, state='Enabled') + cls.services["network"]["networkoffering"] = cls.network_offering.id + + cls.domain1 = Domain.create( + cls.apiclient, + cls.services["domain"]) + cls._cleanup.append(cls.domain1) + cls.account = Account.create( + cls.apiclient, + cls.services["account"], + domainid=cls.domain1.id) + cls._cleanup.append(cls.account) + cls.userapiclient = cls.testClient.getUserApiClient( + UserName=cls.account.name, + DomainName=cls.account.domain + ) + cls.l2_network = Network.create( + cls.userapiclient, + cls.services["l2-network"], + zoneid=cls.zone.id, + networkofferingid=cls.network_offering.id + ) + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + cls.services["virtual_machine"]["template"] = template.id + + @classmethod + def tearDownClass(cls): + super(TestPurgeExpungedVms, cls).tearDownClass() + + def updateVmCreatedRemovedInDb(self, vm_id, timestamp): + # Assuming DB is UTC + utc_timestamp = datetime.strptime(timestamp, DATETIME_FORMAT).astimezone(pytz.utc).strftime(DATETIME_FORMAT) + logging.info("Updating VM: %s created and removed in DB with timestamp: %s" % (vm_id, timestamp)) + query = "UPDATE cloud.vm_instance SET created='%s', removed='%s' WHERE uuid='%s'" % (utc_timestamp, utc_timestamp, vm_id) + self.dbConnection.execute(query) + + def setupExpungedVm(self, timestamp): + logging.info("Setting up expunged VM with timestamp: %s" % timestamp) + vm = VirtualMachine.create( + self.userapiclient, + self.services["virtual_machine"], + serviceofferingid=self.compute_offering.id, + networkids=self.l2_network.id + ) + self.cleanup.append(vm) + vm_id = vm.id + self.vm_ids[timestamp] = vm_id + vm.delete(self.apiclient, expunge=True) + self.cleanup.remove(vm) + self.updateVmCreatedRemovedInDb(vm_id, timestamp) + + def setupExpungedVms(self): + logging.info("Setup VMs") + self.vm_ids = {} + self.threads = [] + days = 3 + for i in range(days): + logging.info("Setting up expunged VMs for day: %d" % (i + 1)) + thread = threading.Thread(target=self.setupExpungedVm, args=(self.timestamps[i],)) + self.threads.append(thread) + thread.start() + + for index, thread in enumerate(self.threads): + logging.info("Before joining thread %d." % index) + thread.join() + logging.info("Thread %d done" % index) + + def setUp(self): + self.cleanup = [] + self.changedConfigurations = {} + self.staticConfigurations = [] + if 'service_offering' in self._testMethodName: + return + if 'background_task' in self._testMethodName and self.hypervisorIsSimulator: + return + self.days = 3 + self.timestamps = [] + for i in range(self.days): + days_ago = (self.days - i) * 2 + now = (datetime.now() - timedelta(days = days_ago)) + timestamp = now.strftime(DATETIME_FORMAT) + self.timestamps.append(timestamp) + self.setupExpungedVms() + + def tearDown(self): + restartServer = False + for config in self.changedConfigurations: + value = self.changedConfigurations[config] + logging.info("Reverting value of config: %s to %s" % (config, value)) + Configurations.update(self.apiclient, + config, + value=value) + if config in self.staticConfigurations: + restartServer = True + if restartServer: + self.restartAllManagementServers() + super(TestPurgeExpungedVms, self).tearDown() + + def executePurgeExpungedResources(self, start_date, end_date): + cmd = purgeExpungedResources.purgeExpungedResourcesCmd() + if start_date is not None: + cmd.startdate = start_date + if end_date is not None: + cmd.enddate = end_date + self.apiclient.purgeExpungedResources(cmd) + + def getVmsInDb(self, vm_ids): + vm_id_str = "','".join(vm_ids) + vm_id_str = "'" + vm_id_str + "'" + query = "SELECT * FROM cloud.vm_instance WHERE uuid IN (%s)" % vm_id_str + response = self.dbConnection.execute(query) + logging.info("DB response from VM: %s:: %s" % (vm_id_str, response)) + return response + + def validatePurgedVmEntriesInDb(self, purged, not_purged): + if purged is not None: + response = self.getVmsInDb(purged) + self.assertTrue(response is None or len(response) == 0, + "Purged VMs still present in DB") + if not_purged is not None: + response = self.getVmsInDb(not_purged) + self.assertTrue(response is not None or len(response) == len(not_purged), + "Not purged VM not present in DB") + + def changeConfiguration(self, name, value): + current_config = Configurations.list(self.apiclient, name=name)[0] + if current_config.value == value: + return + logging.info("Current value for config: %s is %s, changing it to %s" % (name, current_config.value, value)) + self.changedConfigurations[name] = current_config.value + if current_config.isdynamic == False: + self.staticConfigurations.append(name) + Configurations.update(self.apiclient, + name, + value=value) + + def isManagementUp(self): + try: + self.apiclient.listInfrastructure(listInfrastructure.listInfrastructureCmd()) + return True + except Exception: + return False + + def getManagementServerIps(self): + if self.mgtSvrDetails["mgtSvrIp"] == 'localhost': + return None + cmd = listManagementServers.listManagementServersCmd() + servers = self.apiclient.listManagementServers(cmd) + active_server_ips = [] + active_server_ips.append(self.mgtSvrDetails["mgtSvrIp"]) + for idx, server in enumerate(servers): + if server.state == 'Up' and server.serviceip != self.mgtSvrDetails["mgtSvrIp"]: + active_server_ips.append(server.serviceip) + return active_server_ips + + def restartAllManagementServers(self): + """Restart all management servers + Assumes all servers have same username and password""" + server_ips = self.getManagementServerIps() + if server_ips is None: + self.staticConfigurations.clear() + self.fail("MS restarts cannot be done on %s" % self.mgtSvrDetails["mgtSvrIp"]) + return False + self.debug("Restarting all management server") + for idx, server_ip in enumerate(server_ips): + self.debug(f"Restarting management server #{idx} with IP {server_ip}") + sshClient = SshClient( + server_ip, + 22, + self.mgtSvrDetails["user"], + self.mgtSvrDetails["passwd"] + ) + command = "service cloudstack-management stop" + sshClient.execute(command) + command = "service cloudstack-management start" + sshClient.execute(command) + if idx == 0: + # Wait before restarting other management servers to make the first as oldest running + time.sleep(10) + + # Waits for management to come up in 10 mins, when it's up it will continue + timeout = time.time() + (10 * 60) + while time.time() < timeout: + if self.isManagementUp() is True: return True + time.sleep(5) + self.debug("Management server did not come up, failing") + return False + + @attr(tags=["advanced"], required_hardware="true") + def test_01_purge_expunged_api_vm_start_date(self): + self.executePurgeExpungedResources(self.timestamps[1], None) + self.validatePurgedVmEntriesInDb( + [self.vm_ids[self.timestamps[1]], self.vm_ids[self.timestamps[2]]], + [self.vm_ids[self.timestamps[0]]] + ) + + @attr(tags=["advanced"], required_hardware="true") + def test_02_purge_expunged_api_vm_end_date(self): + self.executePurgeExpungedResources(None, self.timestamps[1]) + self.validatePurgedVmEntriesInDb( + [self.vm_ids[self.timestamps[0]], self.vm_ids[self.timestamps[1]]], + [self.vm_ids[self.timestamps[2]]] + ) + + @attr(tags=["advanced"], required_hardware="true") + def test_03_purge_expunged_api_vm_start_end_date(self): + self.executePurgeExpungedResources(self.timestamps[0], self.timestamps[2]) + self.validatePurgedVmEntriesInDb( + [self.vm_ids[self.timestamps[0]], self.vm_ids[self.timestamps[1]], self.vm_ids[self.timestamps[2]]], + None + ) + + @attr(tags=["advanced"], required_hardware="true") + def test_04_purge_expunged_api_vm_no_date(self): + self.executePurgeExpungedResources(None, None) + self.validatePurgedVmEntriesInDb( + [self.vm_ids[self.timestamps[0]], self.vm_ids[self.timestamps[1]], self.vm_ids[self.timestamps[2]]], + None + ) + + @attr(tags=["advanced", "skip_setup_vms"], required_hardware="true") + def test_05_purge_expunged_vm_service_offering(self): + purge_delay = 181 + self.changeConfiguration('expunged.resource.purge.job.delay', purge_delay) + vm = VirtualMachine.create( + self.userapiclient, + self.services["virtual_machine"], + serviceofferingid=self.purge_resource_compute_offering.id, + networkids=self.l2_network.id + ) + self.cleanup.append(vm) + vm_id = vm.id + vm.delete(self.apiclient, expunge=True) + self.cleanup.remove(vm) + wait = 1.25 * purge_delay + logging.info("Waiting for 1.25x%d = %d seconds for VM to get purged" % (purge_delay, wait)) + time.sleep(wait) + self.validatePurgedVmEntriesInDb( + [vm_id], + None + ) + + @skipTestIf("hypervisorIsSimulator") + @attr(tags=["advanced"], required_hardware="true") + def test_06_purge_expunged_vm_background_task(self): + purge_task_delay = 120 + self.changeConfiguration('expunged.resources.purge.enabled', 'true') + self.changeConfiguration('expunged.resources.purge.delay', purge_task_delay) + self.changeConfiguration('expunged.resources.purge.interval', int(purge_task_delay/2)) + self.changeConfiguration('expunged.resources.purge.keep.past.days', 1) + if len(self.staticConfigurations) > 0: + self.restartAllManagementServers() + wait_multiple = 2 + wait = wait_multiple * purge_task_delay + logging.info(f"Waiting for {wait_multiple}x{purge_task_delay} = {wait} seconds for background task to execute") + time.sleep(wait) + logging.debug("Validating expunged VMs") + self.validatePurgedVmEntriesInDb( + [self.vm_ids[self.timestamps[0]], self.vm_ids[self.timestamps[1]], self.vm_ids[self.timestamps[2]]], + None + ) diff --git a/test/integration/smoke/test_register_userdata.py b/test/integration/smoke/test_register_userdata.py index c89d08e63e8..f19b5abc9cf 100644 --- a/test/integration/smoke/test_register_userdata.py +++ b/test/integration/smoke/test_register_userdata.py @@ -28,7 +28,7 @@ from marvin.lib.base import (ServiceOffering, NATRule, Template) from marvin.lib.common import get_test_template, get_zone, list_virtual_machines -from marvin.lib.utils import (validateList, cleanup_resources) +from marvin.lib.utils import validateList from nose.plugins.attrib import attr from marvin.codes import PASS,FAIL import base64 @@ -87,6 +87,7 @@ class TestRegisteredUserdata(cloudstackTestCase): # Get Zone, Domain and Default Built-in template self.domain = get_domain(self.apiclient) self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests()) + self.hypervisor = self.testClient.getHypervisorInfo() #create a user account self.account = Account.create( @@ -96,7 +97,7 @@ class TestRegisteredUserdata(cloudstackTestCase): ) self.testdata["mode"] = self.zone.networktype - self.template = get_template(self.apiclient, self.zone.id, self.testdata["ostype"]) + self.template = get_test_template(self.apiclient, self.zone.id, self.hypervisor) #create a service offering small_service_offering = self.testdata["service_offerings"]["small"] @@ -115,25 +116,21 @@ class TestRegisteredUserdata(cloudstackTestCase): self.testdata["network"], networkofferingid=self.no_isolate.id, zoneid=self.zone.id, - accountid="admin", - domainid=1 + accountid=self.account.name, + domainid=self.account.domainid ) #build cleanup list self.cleanup = [ + self.account, + self.no_isolate, self.service_offering, self.isolated_network, - self.no_isolate, - self.account, ] def tearDown(self): - try: - cleanup_resources(self.apiclient, self.cleanup) - except Exception as e: - self.debug("Warning! Exception in tearDown: %s" % e) - + super(TestRegisteredUserdata, self).tearDown() @attr(tags=['advanced', 'simulator', 'basic', 'sg'], required_hardware=False) def test_CRUD_operations_userdata(self): @@ -192,22 +189,21 @@ class TestRegisteredUserdata(cloudstackTestCase): self.apiclient, self.services["virtual_machine"], zoneid=self.zone.id, - accountid="admin", - domainid=1, + accountid=self.account.name, + domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id, networkids=[self.isolated_network.id], userdataid=self.userdata2.userdata.id ) self.cleanup.append(self.virtual_machine) - self.cleanup.append(self.userdata2) networkid = self.virtual_machine.nic[0].networkid src_nat_list = PublicIPAddress.list( self.apiclient, associatednetworkid=networkid, - account="admin", - domainid=1, + account=self.account.name, + domainid=self.account.domainid, listall=True, issourcenat=True, ) @@ -320,8 +316,8 @@ class TestRegisteredUserdata(cloudstackTestCase): self.apiclient, self.services["virtual_machine"], zoneid=self.zone.id, - accountid="admin", - domainid=1, + accountid=self.account.name, + domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id, networkids=[self.isolated_network.id], @@ -329,14 +325,13 @@ class TestRegisteredUserdata(cloudstackTestCase): userdatadetails=[{"key1": "value1"}] ) self.cleanup.append(self.virtual_machine) - self.cleanup.append(self.userdata2) networkid = self.virtual_machine.nic[0].networkid src_nat_list = PublicIPAddress.list( self.apiclient, associatednetworkid=networkid, - account="admin", - domainid=1, + account=self.account.name, + domainid=self.account.domainid, listall=True, issourcenat=True, ) @@ -493,15 +488,14 @@ class TestRegisteredUserdata(cloudstackTestCase): self.apiclient, self.services["virtual_machine"], zoneid=self.zone.id, - accountid="admin", - domainid=1, + accountid=self.account.name, + domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id, networkids=[self.isolated_network.id], userdataid=self.apiUserdata.userdata.id ) self.cleanup.append(self.virtual_machine) - self.cleanup.append(self.apiUserdata) self.template = Template.linkUserDataToTemplate( self.apiclient, @@ -512,8 +506,8 @@ class TestRegisteredUserdata(cloudstackTestCase): src_nat_list = PublicIPAddress.list( self.apiclient, associatednetworkid=networkid, - account="admin", - domainid=1, + account=self.account.name, + domainid=self.account.domainid, listall=True, issourcenat=True, ) @@ -623,16 +617,14 @@ class TestRegisteredUserdata(cloudstackTestCase): self.apiclient, self.services["virtual_machine"], zoneid=self.zone.id, - accountid="admin", - domainid=1, + accountid=self.account.name, + domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id, networkids=[self.isolated_network.id], userdataid=self.apiUserdata.userdata.id ) self.cleanup.append(self.virtual_machine) - self.cleanup.append(self.apiUserdata) - self.cleanup.append(self.templateUserdata) self.template = Template.linkUserDataToTemplate( self.apiclient, @@ -643,8 +635,8 @@ class TestRegisteredUserdata(cloudstackTestCase): src_nat_list = PublicIPAddress.list( self.apiclient, associatednetworkid=networkid, - account="admin", - domainid=1, + account=self.account.name, + domainid=self.account.domainid, listall=True, issourcenat=True, ) @@ -770,8 +762,8 @@ class TestRegisteredUserdata(cloudstackTestCase): self.apiclient, self.services["virtual_machine"], zoneid=self.zone.id, - accountid="admin", - domainid=1, + accountid=self.account.name, + domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id, networkids=[self.isolated_network.id], @@ -781,9 +773,6 @@ class TestRegisteredUserdata(cloudstackTestCase): self.debug("Deploy VM with userdata passed during deployment failed as expected because template userdata override policy is deny. Exception here is : %s" % e.exception) - self.cleanup.append(self.apiUserdata) - self.cleanup.append(self.templateUserdata) - self.template = Template.linkUserDataToTemplate( self.apiclient, templateid=self.template.id @@ -809,7 +798,6 @@ class TestRegisteredUserdata(cloudstackTestCase): account=self.account.name, domainid=self.account.domainid ) - self.cleanup.append(self.userdata) list_userdata = UserData.list(self.apiclient, id=self.userdata.userdata.id, listall=True) self.assertNotEqual( @@ -853,4 +841,3 @@ class TestRegisteredUserdata(cloudstackTestCase): self.userapiclient, id=self.userdata.userdata.id ) - self.cleanup.remove(self.userdata) diff --git a/test/integration/smoke/test_resource_names.py b/test/integration/smoke/test_resource_names.py new file mode 100644 index 00000000000..46fa445f1b1 --- /dev/null +++ b/test/integration/smoke/test_resource_names.py @@ -0,0 +1,299 @@ +# -- coding: utf-8 -- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" BVT tests for resource names with emojis / unicode +""" +from marvin.cloudstackTestCase import cloudstackTestCase + +from marvin.lib.base import (Account, + ServiceOffering, + VirtualMachine, + Template, + Iso, + Volume, + DiskOffering) +from marvin.lib.common import (get_domain, + get_zone, + get_suitable_test_template, + get_builtin_template_info) +from marvin.codes import FAILED +from nose.plugins.attrib import attr +# Import System modules +import time + +_multiprocess_shared_ = True + +class TestResourceNames(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestResourceNames, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) + cls.hypervisor = testClient.getHypervisorInfo() + cls.services['mode'] = cls.zone.networktype + cls._cleanup = [] + + template = get_suitable_test_template( + cls.apiclient, + cls.zone.id, + cls.services["ostype"], + cls.hypervisor + ) + if template == FAILED: + assert False, "get_suitable_test_template() failed to return template with description %s" % cls.services[ + "ostype"] + + # Set Zones and disk offerings + cls.services["domainid"] = cls.domain.id + cls.services["zoneid"] = cls.zone.id + cls.services["template"] = template.id + cls.services["iso1"]["zoneid"] = cls.zone.id + cls.services["small"]["zoneid"] = cls.zone.id + cls.services["small"]["template"] = template.id + + cls.services["account"]["firstname"] = "test🎉" + cls.services["account"]["lastname"] = "account🙂" + cls.account = Account.create( + cls.apiclient, + cls.services["account"], + domainid=cls.domain.id + ) + cls._cleanup.append(cls.account) + + cls.services["service_offerings"]["tiny"]["name"] = "test🎉svcoffering🙂" + cls.service_offering = ServiceOffering.create( + cls.apiclient, + cls.services["service_offerings"]["tiny"] + ) + cls._cleanup.append(cls.service_offering) + + cls.services["disk_offering"]["name"] = "test🎉diskoffering🙂" + cls.disk_offering = DiskOffering.create( + cls.apiclient, + cls.services["disk_offering"] + ) + cls._cleanup.append(cls.disk_offering) + + cls.services["small"]["displayname"] = "test🎉vm🙂" + cls.virtual_machine = VirtualMachine.create( + cls.apiclient, + cls.services["small"], + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.service_offering.id, + mode=cls.services['mode'] + ) + + @classmethod + def tearDownClass(cls): + super(TestResourceNames, cls).tearDownClass() + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + + def tearDown(self): + super(TestResourceNames, self).tearDown() + + @attr(tags=["advanced", "smoke", "basic"], required_hardware="false") + def test_01_deploy_vm(self): + """Test for deploy virtual machine + """ + # Validate the following: + # 1. listVirtualMachines returns accurate information, and check name + list_vm_response = VirtualMachine.list( + self.apiclient, + id=self.virtual_machine.id + ) + + self.debug( + "Verify listVirtualMachines response for virtual machine: %s" \ + % self.virtual_machine.id + ) + self.assertEqual( + isinstance(list_vm_response, list), + True, + "Check list response returns a valid list" + ) + self.assertNotEqual( + len(list_vm_response), + 0, + "Check VM available in List Virtual Machines" + ) + + vm_response = list_vm_response[0] + self.assertEqual( + vm_response.id, + self.virtual_machine.id, + "Check virtual machine id in listVirtualMachines" + ) + self.assertEqual( + vm_response.name, + self.virtual_machine.name, + "Check virtual machine name in listVirtualMachines" + ) + self.assertEqual( + vm_response.displayname, + self.virtual_machine.displayname, + "Check virtual machine display name in listVirtualMachines" + ) + self.assertEqual( + vm_response.state, + 'Running', + msg="VM is not in Running state" + ) + return + + @attr(tags=["advanced", "smoke", "basic"], required_hardware="true") + def test_02_create_volume(self): + """Test for create volume + """ + # Validate the following: + # 1. Create volume and check name + + self.services["diskname"] = "test🎉data🙂volume" + self.volume = Volume.create( + self.apiclient, + self.services, + account=self.account.name, + domainid=self.account.domainid, + diskofferingid=self.disk_offering.id + ) + # self.cleanup.append(self.volume) + self.virtual_machine.attach_volume(self.apiclient, self.volume) + list_volume_response = Volume.list( + self.apiclient, + id=self.volume.id + ) + self.assertEqual( + isinstance(list_volume_response, list), + True, + "Check list response returns a valid list" + ) + self.assertNotEqual( + list_volume_response, + None, + "Check if volume exists in ListVolumes" + ) + + volume_response = list_volume_response[0] + self.assertNotEqual( + volume_response.virtualmachineid, + None, + "Check if volume state (attached) is reflected" + ) + self.assertEqual( + volume_response.name, + self.volume.name, + "Check virtual machine display name in listVirtualMachines" + ) + + @attr(tags=["advanced", "smoke", "basic"], required_hardware="true") + def test_03_register_template(self): + """Test for register template + """ + # Validate the following: + # 1. Register template and check name + + if self.hypervisor.lower() in ["lxc"]: + self.skipTest("Skipping test, unsupported hypervisor %s" % self.hypervisor) + + builtin_info = get_builtin_template_info(self.apiclient, self.zone.id) + self.services["template_2"]["url"] = builtin_info[0] + self.services["template_2"]["hypervisor"] = builtin_info[1] + self.services["template_2"]["format"] = builtin_info[2] + self.services["template_2"]["name"] = "test🎉tmpl🙂" + self.services["template_2"]["displaytext"] = "test🎉tmpl🙂" + + template = Template.register(self.apiclient, + self.services["template_2"], + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid + ) + self.debug("Successfully registered template with ID: %s" % template.id) + self.cleanup.append(template) + + # Get template response + timeout = 600 + list_template_response = None + while timeout >= 0: + list_template_response = Template.list(self.apiclient, + templatefilter=self.services["template_2"]["templatefilter"], + id=template.id) + + if list_template_response is not None and list_template_response[0].isready: + break + + time.sleep(30) + timeout -= 30 + + template_response = list_template_response[0] + self.assertEqual( + template_response.displaytext, + template.displaytext, + "Check template displaytext in response" + ) + + @attr(tags=["advanced", "smoke", "basic"], required_hardware="true") + def test_04_register_iso(self): + """Test for register ISO + """ + # Validate the following: + # 1. Register ISO and check name + + if self.hypervisor.lower() in ["lxc"]: + self.skipTest("Skipping test, unsupported hypervisor %s" % self.hypervisor) + + self.services["iso1"]["displaytext"] = "test🎉iso🙂" + self.services["iso1"]["name"] = "test🎉iso🙂" + iso = Iso.create( + self.apiclient, + self.services["iso1"], + account=self.account.name, + domainid=self.account.domainid + ) + self.debug("Successfully registered ISO with ID: %s" % iso.id) + self.cleanup.append(iso) + + # Get ISO response + timeout = 600 + list_iso_response = None + while timeout >= 0: + list_iso_response = Iso.list( + self.apiclient, + isofilter="self", + id=iso.id + ) + + if list_iso_response is not None and list_iso_response[0].isready: + break + + time.sleep(30) + timeout -= 30 + + iso_response = list_iso_response[0] + self.assertEqual( + iso_response.displaytext, + iso.displaytext, + "Check ISO displaytext in response" + ) diff --git a/test/integration/smoke/test_scale_vm.py b/test/integration/smoke/test_scale_vm.py index 7f8b65b8465..c2ae85df3b2 100644 --- a/test/integration/smoke/test_scale_vm.py +++ b/test/integration/smoke/test_scale_vm.py @@ -221,7 +221,7 @@ class TestScaleVm(cloudstackTestCase): result = str( sshClient.execute("service vmware-tools status")).lower() self.debug("and result is: %s" % result) - if not "running" in result: + if "running" not in result: self.skipTest("Skipping scale VM operation because\ VMware tools are not installed on the VM") res = None @@ -355,7 +355,7 @@ class TestScaleVm(cloudstackTestCase): result = str( sshClient.execute("service vmware-tools status")).lower() self.debug("and result is: %s" % result) - if not "running" in result: + if "running" not in result: self.skipTest("Skipping scale VM operation because\ VMware tools are not installed on the VM") @@ -464,7 +464,7 @@ class TestScaleVm(cloudstackTestCase): result = str( sshClient.execute("service vmware-tools status")).lower() self.debug("and result is: %s" % result) - if not "running" in result: + if "running" not in result: self.skipTest("Skipping scale VM operation because\ VMware tools are not installed on the VM") @@ -555,7 +555,7 @@ class TestScaleVm(cloudstackTestCase): result = str( sshClient.execute("service vmware-tools status")).lower() self.debug("and result is: %s" % result) - if not "running" in result: + if "running" not in result: self.skipTest("Skipping scale VM operation because\ VMware tools are not installed on the VM") res = None @@ -700,7 +700,7 @@ class TestScaleVm(cloudstackTestCase): result = str( sshClient.execute("service vmware-tools status")).lower() self.debug("and result is: %s" % result) - if not "running" in result: + if "running" not in result: self.skipTest("Skipping scale VM operation because\ VMware tools are not installed on the VM") res = None diff --git a/test/integration/smoke/test_service_offerings.py b/test/integration/smoke/test_service_offerings.py index 62e39e195c2..c6a14a64471 100644 --- a/test/integration/smoke/test_service_offerings.py +++ b/test/integration/smoke/test_service_offerings.py @@ -1043,7 +1043,7 @@ class TestCpuCapServiceOfferings(cloudstackTestCase): #Get host CPU usage from top command before and after VM consuming 100% CPU find_pid_cmd = "ps -ax | grep '%s' | head -1 | awk '{print $1}'" % self.vm.id pid = ssh_host.execute(find_pid_cmd)[0] - cpu_usage_cmd = "top -b n 1 p %s | tail -1 | awk '{print $9}'" % pid + cpu_usage_cmd = "top -b -n 1 -p %s | tail -1 | awk '{print $9}'" % pid host_cpu_usage_before_str = ssh_host.execute(cpu_usage_cmd)[0] host_cpu_usage_before = round(float(host_cpu_usage_before_str)) diff --git a/test/integration/smoke/test_sharedfs_lifecycle.py b/test/integration/smoke/test_sharedfs_lifecycle.py new file mode 100644 index 00000000000..f4b2c2fc593 --- /dev/null +++ b/test/integration/smoke/test_sharedfs_lifecycle.py @@ -0,0 +1,277 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +Tests for Shared FileSystem +""" +import time + +# Import Local Modules +from nose.plugins.attrib import attr +from marvin.cloudstackAPI import (createFirewallRule, + createPortForwardingRule) +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.lib.utils import (cleanup_resources, + wait_until) +from marvin.lib.base import (Account, + VirtualMachine, + Network, + SharedFS, + ServiceOffering, + NetworkOffering, + DiskOffering, + PublicIPAddress, + ) +from marvin.lib.common import (get_domain, + get_zone, + get_template) +from marvin.codes import FAILED + +from marvin.lib.decoratorGenerators import skipTestIf + + +class TestSharedFSLifecycle(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + + cls.testClient = super(TestSharedFSLifecycle, cls).getClsTestClient() + cls.apiclient = cls.testClient.getApiClient() + cls.services = cls.testClient.getParsedTestDataConfig() + + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) + cls._cleanup = [] + + cls.hypervisor = cls.testClient.getHypervisorInfo() + cls.hypervisorNotSupported = False + if cls.hypervisor.lower() not in ["kvm", "vmware"]: + cls.hypervisorNotSupported = True + return + + cls.services["service_offering"]["name"] = 'FSVM offering'; + cls.services["service_offering"]["offerha"] = True; + cls.services["service_offering"]["cpunumber"] = 2; + cls.services["service_offering"]["cpuspeed"] = 500; + cls.services["service_offering"]["memory"] = 1024; + cls.service_offering = ServiceOffering.create( + cls.apiclient, + cls.services["service_offering"] + ) + cls._cleanup.append(cls.service_offering) + + cls.services["disk_offering"]["disksize"] = 1; + cls.disk_offering = DiskOffering.create( + cls.apiclient, + cls.services["disk_offering"], + custom=True + ) + cls._cleanup.append(cls.disk_offering) + + cls.useraccount = Account.create( + cls.apiclient, + cls.services["account"], + domainid=cls.domain.id + ) + cls._cleanup.append(cls.useraccount) + + cls.adminaccount = Account.create( + cls.apiclient, + cls.services["account"], + domainid=cls.domain.id, + admin=True + ) + cls._cleanup.append(cls.adminaccount) + + cls.network_offering_isolated = NetworkOffering.create( + cls.apiclient, + cls.services["isolated_network_offering"] + ) + cls.network_offering_isolated.update(cls.apiclient, state='Enabled') + cls._cleanup.append(cls.network_offering_isolated) + cls.services["network"]["name"] = "Test Network Isolated" + cls.user_network = Network.create( + cls.apiclient, + cls.services["network"], + networkofferingid=cls.network_offering_isolated.id, + domainid=cls.domain.id, + accountid=cls.adminaccount.name, + zoneid=cls.zone.id + ) + cls._cleanup.insert(0, cls.user_network) + cls.public_ipaddress = None + cls.sshpublicport = 1000 + + cls.template = get_template( + cls.apiclient, + cls.zone.id, + cls.services["ostype"]) + if cls.template == FAILED: + assert False, "get_template() failed to return template with description %s" % cls.services["ostype"] + + cls.services["domainid"] = cls.domain.id + cls.services["zoneid"] = cls.zone.id + cls.services["diskofferingid"] = cls.disk_offering.id + cls.services["serviceofferingid"] = cls.service_offering.id + cls.services["networkid"] = cls.user_network.id + cls.services["account"] = cls.adminaccount.name + + cls.sharedfs = SharedFS.create( + cls.apiclient, + cls.services, + name='Test Shared FileSystem 1', + size=2, + filesystem='XFS' + ) + cls._cleanup.insert(0, cls.sharedfs) + + cls.virtual_machine1 = VirtualMachine.create( + cls.apiclient, + cls.services["virtual_machine"], + templateid=cls.template.id, + serviceofferingid=cls.service_offering.id, + networkids=cls.user_network.id, + domainid=cls.domain.id, + accountid=cls.adminaccount.name, + zoneid=cls.zone.id + ) + cls._cleanup.insert(0, cls.virtual_machine1) + + cls.public_ipaddress = cls.setUpSNAT(cls, cls.user_network) + cls.debug("Public ipaddress: " + cls.public_ipaddress.ipaddress) + port = cls.setUpPortForwarding(cls, cls.virtual_machine1.id) + cls.vm1_ssh_client = cls.getSSHClient(cls, cls.virtual_machine1, port) + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.apiclient, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + return + + def tearDown(self): + try: + self.debug("Cleaning up the resources") + cleanup_resources(self.apiclient, self.cleanup) + self.debug("Cleanup complete!") + except Exception as e: + self.debug("Warning! Exception in tearDown: %s" % e) + + def setUpSNAT(self, network): + public_ipaddress = PublicIPAddress.list( + self.apiclient, + account=self.adminaccount.name, + domainid=self.domain.id, + associatednetworkid=network.id + ) + createFwRule = createFirewallRule.createFirewallRuleCmd() + createFwRule.cidrlist = "0.0.0.0/0" + createFwRule.startport = 22 + createFwRule.endport = 22 + createFwRule.ipaddressid = public_ipaddress[0].id + createFwRule.protocol = "tcp" + self.apiclient.createFirewallRule(createFwRule) + return public_ipaddress[0] + + def setUpPortForwarding(self, virtualmachineid): + createPfRule = createPortForwardingRule.createPortForwardingRuleCmd() + self.sshpublicport += 1 + createPfRule.publicport = self.sshpublicport + createPfRule.privateport = 22 + createPfRule.virtualmachineid = virtualmachineid + createPfRule.ipaddressid = self.public_ipaddress.id + createPfRule.protocol = "tcp" + self.apiclient.createPortForwardingRule(createPfRule) + self.debug("Successfully programmed PF rule for :%s"%self.public_ipaddress.ipaddress) + return createPfRule.publicport + + def getSSHClient(self, virtual_machine, port): + try: + ssh_client = virtual_machine.get_ssh_client(ipaddress=self.public_ipaddress.ipaddress, port=port) + except Exception as e: + self.fail("SSH failed for virtual machine: %s - %s" % (virtual_machine.ipaddress, e)) + return ssh_client + + def mountSharedFSOnVM(self, ssh_client, sharedfs): + sharedfs_ip = sharedfs.nic[0].ipaddress + ssh_client.execute("mkdir /mnt/fs1") + cmd = "mount -t nfs -o nolock " + sharedfs_ip + ":/export /mnt/fs1" + ssh_client.execute(cmd) + + @attr( tags=[ "advanced", "advancedns", "smokes"], required_hardware="true") + @skipTestIf("hypervisorNotSupported") + def test_mount_shared_fs(self): + """Mount Shared FileSystem on two VMs and match contents + """ + self.mountSharedFSOnVM(self.vm1_ssh_client, self.sharedfs) + self.vm1_ssh_client.execute("df -Th /mnt/fs1") + self.vm1_ssh_client.execute("touch /mnt/fs1/test") + + try: + self.virtual_machine2 = VirtualMachine.create( + self.apiclient, + self.services["virtual_machine"], + templateid=self.template.id, + serviceofferingid=self.service_offering.id, + networkids=self.user_network.id, + domainid=self.domain.id, + accountid=self.adminaccount.name, + zoneid=self.zone.id + ) + except Exception as e: + self.vm1_ssh_client.execute("rm /mnt/fs1/test") + self.fail(e) + + self.cleanup.append(self.virtual_machine2) + + port = self.setUpPortForwarding(self.virtual_machine2.id) + ssh_client = self.getSSHClient(self.virtual_machine2, port) + self.assertIsNotNone(ssh_client) + + self.mountSharedFSOnVM(ssh_client, self.sharedfs) + ssh_client.execute("df -Th /mnt/fs1") + result = ssh_client.execute("ls /mnt/fs1/test") + self.assertEqual(result[0], "/mnt/fs1/test") + + @attr( tags=[ "advanced", "advancedns", "smokes"], required_hardware="true") + @skipTestIf("hypervisorNotSupported") + def test_resize_shared_fs(self): + """Resize the shared filesystem by changing the disk offering and validate + """ + self.mountSharedFSOnVM(self.vm1_ssh_client, self.sharedfs) + result = self.vm1_ssh_client.execute("df -Th /mnt/fs1 | grep nfs")[0] + self.debug(result) + size = result.split()[-5] + self.debug("Size of the filesystem is " + size) + self.assertEqual(size, "2.0G", "SharedFS size should be 2.0G") + + response = SharedFS.stop(self.sharedfs, self.apiclient) + response = SharedFS.changediskoffering(self.sharedfs, self.apiclient, self.disk_offering.id, 3) + self.debug(response) + response = SharedFS.start(self.sharedfs, self.apiclient) + time.sleep(10) + + result = self.vm1_ssh_client.execute("df -Th /mnt/fs1 | grep nfs")[0] + size = result.split()[-5] + self.debug("Size of the filesystem is " + size) + self.assertEqual(size, "3.0G", "SharedFS size should be 3.0G") diff --git a/test/integration/smoke/test_usage.py b/test/integration/smoke/test_usage.py index 2100859ba52..1a6ff37cedb 100644 --- a/test/integration/smoke/test_usage.py +++ b/test/integration/smoke/test_usage.py @@ -95,7 +95,7 @@ class Services: "iso": { "displaytext": "Test ISO", "name": "Test ISO", - "url": "http://people.apache.org/~tsp/dummy.iso", + "url": "http://download.cloudstack.org/testing/marvin/dummy.iso", # Source URL where ISO is located "isextractable": True, "isfeatured": True, diff --git a/test/integration/smoke/test_vm_life_cycle.py b/test/integration/smoke/test_vm_life_cycle.py index aaffa63978a..c7c9a01bd32 100644 --- a/test/integration/smoke/test_vm_life_cycle.py +++ b/test/integration/smoke/test_vm_life_cycle.py @@ -31,6 +31,7 @@ from marvin.cloudstackAPI import (recoverVirtualMachine, from marvin.lib.utils import * from marvin.lib.base import (Account, + Role, ServiceOffering, VirtualMachine, Host, @@ -94,17 +95,21 @@ class TestDeployVM(cloudstackTestCase): cls.services["iso1"]["zoneid"] = cls.zone.id + cls._cleanup = [] + cls.account = Account.create( cls.apiclient, cls.services["account"], domainid=cls.domain.id ) + cls._cleanup.append(cls.account) cls.debug(cls.account.id) cls.service_offering = ServiceOffering.create( cls.apiclient, cls.services["service_offerings"]["tiny"] ) + cls._cleanup.append(cls.service_offering) cls.virtual_machine = VirtualMachine.create( cls.apiclient, @@ -115,17 +120,9 @@ class TestDeployVM(cloudstackTestCase): mode=cls.services['mode'] ) - cls.cleanup = [ - cls.service_offering, - cls.account - ] - @classmethod def tearDownClass(cls): - try: - cleanup_resources(cls.apiclient, cls.cleanup) - except Exception as e: - raise Exception("Warning: Exception during cleanup : %s" % e) + super(TestDeployVM, cls).tearDownClass() def setUp(self): self.apiclient = self.testClient.getApiClient() @@ -262,11 +259,7 @@ class TestDeployVM(cloudstackTestCase): ) def tearDown(self): - try: - # Clean up, terminate the created instance, volumes and snapshots - cleanup_resources(self.apiclient, self.cleanup) - except Exception as e: - raise Exception("Warning: Exception during cleanup : %s" % e) + super(TestDeployVM, self).tearDown() class TestVMLifeCycle(cloudstackTestCase): @@ -279,7 +272,7 @@ class TestVMLifeCycle(cloudstackTestCase): cls.hypervisor = testClient.getHypervisorInfo() # Get Zone, Domain and templates - domain = get_domain(cls.apiclient) + cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype @@ -309,7 +302,7 @@ class TestVMLifeCycle(cloudstackTestCase): cls.account = Account.create( cls.apiclient, cls.services["account"], - domainid=domain.id + domainid=cls.domain.id ) cls.small_offering = ServiceOffering.create( @@ -362,6 +355,7 @@ class TestVMLifeCycle(cloudstackTestCase): self.cleanup = [] def tearDown(self): + # This should be a super call instead (like tearDownClass), which reverses cleanup order. Kept for now since fixing requires adjusting test 12. try: # Clean up, terminate the created ISOs cleanup_resources(self.apiclient, self.cleanup) @@ -929,7 +923,7 @@ class TestVMLifeCycle(cloudstackTestCase): domainid=self.account.domainid, diskofferingid=custom_disk_offering.id ) - self.cleanup.append(volume) + self.cleanup.append(volume) # Needs adjusting when changing tearDown to a super call, since it will try to delete an attached volume. VirtualMachine.attach_volume(vm, self.apiclient, volume) # Start the VM @@ -955,6 +949,120 @@ class TestVMLifeCycle(cloudstackTestCase): "Check virtual machine is in running state" ) + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_13_destroy_and_expunge_vm(self): + """Test destroy virtual machine with expunge parameter depending on whether the caller's role has expunge permission. + """ + # Setup steps: + # 1. Create role with DENY expunge permission. + # 2. Create account with said role. + # 3. Create a VM of said account. + # 4. Create a VM of cls.account + # Validation steps: + # 1. Destroy the VM with the created account and verify it was not destroyed. + # 1. Destroy the other VM with cls.account and verify it was expunged. + + role = Role.importRole( + self.apiclient, + { + "name": "MarvinFake Import Role ", + "type": "DomainAdmin", + "description": "Fake Import Domain Admin Role created by Marvin test", + "rules" : [{"rule":"list*", "permission":"allow","description":"Listing apis"}, + {"rule":"get*", "permission":"allow","description":"Get apis"}, + {"rule":"update*", "permission":"allow","description":"Update apis"}, + {"rule":"queryAsyncJobResult", "permission":"allow","description":"Query async job result"}, + {"rule":"deployVirtualMachine", "permission":"allow","description":"Deploy virtual machine"}, + {"rule":"destroyVirtualMachine", "permission":"allow","description":"Destroy virtual machine"}, + {"rule":"expungeVirtualMachine", "permission":"deny","description":"Expunge virtual machine"}] + }, + ) + self.cleanup.append(role) + + domadm = Account.create( + self.apiclient, + self.services["account"], + admin=True, + roleid=role.id, + domainid=self.domain.id + ) + self.cleanup[-1]=domadm # Hacky way to reverse cleanup order to avoid deleting the role before account. Remove this line when tearDown is changed to call super(). + self.cleanup.append(role) # Should be self.cleanup.append(domadm) when tearDown is changed to call super(). + + domadm_apiclient = self.testClient.getUserApiClient(UserName=domadm.name, DomainName=self.domain.name, type=1) + + vm1 = VirtualMachine.create( + self.apiclient, + self.services["small"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.small_offering.id, + ) + + vm2 = VirtualMachine.create( + domadm_apiclient, + self.services["small"], + accountid=domadm.name, + domainid=domadm.domainid, + serviceofferingid=self.small_offering.id, + ) + + self.debug("Expunge VM-ID: %s" % vm1.id) + + cmd = destroyVirtualMachine.destroyVirtualMachineCmd() + cmd.id = vm1.id + cmd.expunge = True + response = self.apiclient.destroyVirtualMachine(cmd) + + self.debug("response: %s" % response) + self.debug("response: %s" % response.id) + self.assertEqual( + response.id, + None, + "Check if VM was expunged.", + ) + + self.debug("Expunge VM-ID: %s" % vm2.id) + + cmd = destroyVirtualMachine.destroyVirtualMachineCmd() + cmd.id = vm2.id + cmd.expunge = True + try: + domadm_apiclient.destroyVirtualMachine(cmd) + self.failed("Destroy VM with expunge should have raised an exception.") + except: + self.debug("Expected exception! Keep going.") + + return + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_14_destroy_vm_delete_protection(self): + """Test destroy Virtual Machine with delete protection + """ + + # Validate the following + # 1. Should not be able to delete the VM when delete protection is enabled + # 2. Should be able to delete the VM after disabling delete protection + + vm = VirtualMachine.create( + self.apiclient, + self.services["small"], + serviceofferingid=self.small_offering.id, + mode=self.services["mode"], + startvm=False + ) + + vm.update(self.apiclient, deleteprotection=True) + try: + vm.delete(self.apiclient) + self.fail("VM shouldn't get deleted with delete protection enabled") + except Exception as e: + self.debug("Expected exception: %s" % e) + + vm.update(self.apiclient, deleteprotection=False) + vm.delete(self.apiclient) + + return class TestSecuredVmMigration(cloudstackTestCase): diff --git a/test/integration/smoke/test_vm_strict_host_tags.py b/test/integration/smoke/test_vm_strict_host_tags.py new file mode 100644 index 00000000000..163869c8fae --- /dev/null +++ b/test/integration/smoke/test_vm_strict_host_tags.py @@ -0,0 +1,552 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Import Local Modules +from marvin.cloudstackAPI import (expungeVirtualMachine, updateConfiguration) +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.lib.base import (Account, ServiceOffering, Template, Host, VirtualMachine) +from marvin.lib.common import (get_domain, get_zone) +from nose.plugins.attrib import attr + + +class TestVMDeploymentPlannerStrictTags(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + + testClient = super(TestVMDeploymentPlannerStrictTags, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) + cls.hypervisor = testClient.getHypervisorInfo() + cls.services['mode'] = cls.zone.networktype + + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + + # Create an account, network, VM and IP addresses + cls.account = Account.create(cls.apiclient, cls.services["account"], domainid=cls.domain.id) + cls.service_offering_h1 = ServiceOffering.create(cls.apiclient, cls.services["service_offering_h1"]) + cls.service_offering_h2 = ServiceOffering.create(cls.apiclient, cls.services["service_offering_h2"]) + + cls.template_t1 = Template.register(cls.apiclient, cls.services["test_templates"][cls.hypervisor.lower()], + zoneid=cls.zone.id, hypervisor=cls.hypervisor.lower(), templatetag="t1") + cls.template_t1.download(cls.apiclient) + + cls.template_t2 = Template.register(cls.apiclient, cls.services["test_templates"][cls.hypervisor.lower()], + zoneid=cls.zone.id, hypervisor=cls.hypervisor.lower(), templatetag="t2") + cls.template_t2.download(cls.apiclient) + + hosts = Host.list(cls.apiclient, zoneid=cls.zone.id, type='Routing') + cls.host_h1 = hosts[0] if len(hosts) >= 1 else None + + cls._cleanup = [cls.account, cls.service_offering_h1, cls.service_offering_h2, cls.template_t1, cls.template_t2] + + @classmethod + def tearDownClass(cls): + if cls.host_h1: + Host.update(cls.apiclient, id=cls.host_h1.id, hosttags="") + cls.updateConfiguration("vm.strict.host.tags", "") + cls.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + cls.updateConfiguration("resource.limit.host.tags", "") + super(TestVMDeploymentPlannerStrictTags, cls).tearDownClass() + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + if self.host_h1: + Host.update(self.apiclient, id=self.host_h1.id, hosttags="h1,t1,v1") + self.updateConfiguration("vm.strict.host.tags", "") + self.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + self.updateConfiguration("resource.limit.host.tags", "") + self.cleanup = [] + + def tearDown(self): + self.cleanup_vm_for_template(self.template_t1.id) + self.cleanup_vm_for_template(self.template_t2.id) + super(TestVMDeploymentPlannerStrictTags, self).tearDown() + + def cleanup_vm_for_template(self, templateid): + vm_list = VirtualMachine.list(self.apiclient, listall=True, templateid=templateid) + if type(vm_list) is list: + for vm in vm_list: + self.expunge_vm(vm) + + def expunge_vm(self, vm): + try: + cmd = expungeVirtualMachine.expungeVirtualMachineCmd() + cmd.id = vm.id + self.apiclient.expungeVirtualMachine(cmd) + except Exception as e: + self.debug("Failed to expunge VM: %s" % e) + + @classmethod + def updateConfiguration(self, name, value): + cmd = updateConfiguration.updateConfigurationCmd() + cmd.name = name + cmd.value = value + self.apiclient.updateConfiguration(cmd) + + def deploy_vm(self, destination_id, template_id, service_offering_id): + return VirtualMachine.create(self.apiclient, self.services["virtual_machine"], zoneid=self.zone.id, + templateid=template_id, serviceofferingid=service_offering_id, + hostid=destination_id) + + @attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="false") + def test_01_deploy_vm_on_specific_host_without_strict_tags(self): + self.updateConfiguration("vm.strict.host.tags", "") + self.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + self.updateConfiguration("resource.limit.host.tags", "") + + vm = self.deploy_vm(self.host_h1.id, self.template_t1.id, self.service_offering_h1.id) + self.cleanup.append(vm) + self.assertEqual(self.host_h1.id, vm.hostid, "VM instance was not deployed on target host ID") + + @attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="false") + def test_02_deploy_vm_on_any_host_without_strict_tags(self): + self.updateConfiguration("vm.strict.host.tags", "") + self.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + self.updateConfiguration("resource.limit.host.tags", "") + + vm = self.deploy_vm(None, self.template_t1.id, self.service_offering_h1.id) + self.cleanup.append(vm) + self.assertIsNotNone(vm, "VM instance was not deployed") + + @attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="false") + def test_03_deploy_vm_on_specific_host_with_strict_tags_success(self): + self.updateConfiguration("vm.strict.host.tags", "v1,v2") + self.updateConfiguration("vm.strict.resource.limit.host.tag.check", "false") + self.updateConfiguration("resource.limit.host.tags", "h1,h2,t1,t2") + + vm = self.deploy_vm(self.host_h1.id, self.template_t1.id, self.service_offering_h1.id) + self.cleanup.append(vm) + self.assertEqual(self.host_h1.id, vm.hostid, "VM instance was not deployed on target host ID") + + self.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + + vm = self.deploy_vm(self.host_h1.id, self.template_t1.id, self.service_offering_h1.id) + self.cleanup.append(vm) + self.assertEqual(self.host_h1.id, vm.hostid, "VM instance was not deployed on target host ID") + + @attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="false") + def test_04_deploy_vm_on_any_host_with_strict_tags_success(self): + self.updateConfiguration("vm.strict.host.tags", "v1,v2") + self.updateConfiguration("vm.strict.resource.limit.host.tag.check", "false") + self.updateConfiguration("resource.limit.host.tags", "h1,h2,t1,t2") + + vm = self.deploy_vm(None, self.template_t1.id, self.service_offering_h1.id) + self.cleanup.append(vm) + self.assertIsNotNone(vm, "VM instance was not deployed") + + self.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + + vm = self.deploy_vm(None, self.template_t1.id, self.service_offering_h1.id) + self.cleanup.append(vm) + self.assertEqual(self.host_h1.id, vm.hostid, "VM instance was not deployed on target host ID") + + @attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="false") + def test_05_deploy_vm_on_specific_host_with_strict_tags_failure(self): + self.updateConfiguration("vm.strict.host.tags", "v1,v2") + self.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + self.updateConfiguration("resource.limit.host.tags", "h1,h2,t1,t2") + + try: + vm = self.deploy_vm(self.host_h1.id, self.template_t2.id, self.service_offering_h1.id) + self.cleanup.append(vm) + self.fail("VM should not be deployed") + except Exception as e: + self.assertTrue("Cannot deploy VM, destination host" in str(e)) + + try: + vm = self.deploy_vm(self.host_h1.id, self.template_t2.id, self.service_offering_h2.id) + self.cleanup.append(vm) + self.fail("VM should not be deployed") + except Exception as e: + self.assertTrue("Cannot deploy VM, destination host" in str(e)) + + @attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="false") + def test_06_deploy_vm_on_any_host_with_strict_tags_failure(self): + self.updateConfiguration("vm.strict.host.tags", "v1,v2") + self.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + self.updateConfiguration("resource.limit.host.tags", "h1,h2,t1,t2") + + try: + vm = self.deploy_vm(None, self.template_t2.id, self.service_offering_h1.id) + self.cleanup.append(vm) + self.fail("VM should not be deployed") + except Exception as e: + self.assertTrue("No suitable host found for vm " in str(e)) + + +class TestScaleVMStrictTags(cloudstackTestCase): + @classmethod + def setUpClass(cls): + testClient = super(TestScaleVMStrictTags, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) + cls.hypervisor = testClient.getHypervisorInfo() + cls.services['mode'] = cls.zone.networktype + + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + + # Create an account, network, VM and IP addresses + cls.account = Account.create(cls.apiclient, cls.services["account"], domainid=cls.domain.id) + cls.service_offering_h1 = ServiceOffering.create(cls.apiclient, cls.services["service_offering_h1"]) + cls.service_offering_h2 = ServiceOffering.create(cls.apiclient, cls.services["service_offering_h2"]) + + cls.template_t1 = Template.register(cls.apiclient, cls.services["test_templates"][cls.hypervisor.lower()], + zoneid=cls.zone.id, hypervisor=cls.hypervisor.lower(), templatetag="t1") + cls.template_t1.download(cls.apiclient) + + cls.template_t2 = Template.register(cls.apiclient, cls.services["test_templates"][cls.hypervisor.lower()], + zoneid=cls.zone.id, hypervisor=cls.hypervisor.lower(), templatetag="t2") + cls.template_t2.download(cls.apiclient) + + hosts = Host.list(cls.apiclient, zoneid=cls.zone.id, type='Routing') + cls.host_h1 = hosts[0] if len(hosts) >= 1 else None + + cls._cleanup = [cls.account, cls.service_offering_h1, cls.service_offering_h2, cls.template_t1, cls.template_t2] + + @classmethod + def tearDownClass(cls): + if cls.host_h1: + Host.update(cls.apiclient, id=cls.host_h1.id, hosttags="") + cls.updateConfiguration("vm.strict.host.tags", "") + cls.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + cls.updateConfiguration("resource.limit.host.tags", "") + super(TestScaleVMStrictTags, cls).tearDownClass() + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + if self.host_h1: + Host.update(self.apiclient, id=self.host_h1.id, hosttags="h1,t1,v1,h2,t2,v2") + self.updateConfiguration("vm.strict.host.tags", "") + self.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + self.updateConfiguration("resource.limit.host.tags", "") + self.cleanup = [] + + def tearDown(self): + self.cleanup_vm_for_template(self.template_t1.id) + self.cleanup_vm_for_template(self.template_t2.id) + super(TestScaleVMStrictTags, self).tearDown() + + def cleanup_vm_for_template(self, templateid): + vm_list = VirtualMachine.list(self.apiclient, listall=True, templateid=templateid) + if type(vm_list) is list: + for vm in vm_list: + self.expunge_vm(vm) + + def expunge_vm(self, vm): + try: + cmd = expungeVirtualMachine.expungeVirtualMachineCmd() + cmd.id = vm.id + self.apiclient.expungeVirtualMachine(cmd) + except Exception as e: + self.debug("Failed to expunge VM: %s" % e) + + @classmethod + def updateConfiguration(self, name, value): + cmd = updateConfiguration.updateConfigurationCmd() + cmd.name = name + cmd.value = value + self.apiclient.updateConfiguration(cmd) + + def deploy_vm(self, destination_id, template_id, service_offering_id): + return VirtualMachine.create(self.apiclient, self.services["virtual_machine"], zoneid=self.zone.id, + templateid=template_id, serviceofferingid=service_offering_id, + hostid=destination_id) + + @attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="false") + def test_01_scale_vm_strict_tags_success(self): + self.updateConfiguration("vm.strict.host.tags", "v1,v2") + self.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + self.updateConfiguration("resource.limit.host.tags", "h1,h2,t1,t2") + + vm = self.deploy_vm(self.host_h1.id, self.template_t1.id, self.service_offering_h1.id) + self.cleanup.append(vm) + self.assertEqual(self.host_h1.id, vm.hostid, "VM instance was not deployed on target host ID") + vm.stop(self.apiclient) + vm.scale(self.apiclient, serviceOfferingId=self.service_offering_h2.id) + vm.start(self.apiclient) + scaled_vm = VirtualMachine.list(self.apiclient, id=vm.id, listall=True)[0] + self.assertEqual(scaled_vm.serviceofferingid, self.service_offering_h2.id, "VM was not scaled") + self.assertEqual(self.host_h1.id, scaled_vm.hostid, "VM was not scaled") + + @attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="false") + def test_02_scale_vm_strict_tags_failure(self): + if self.host_h1: + Host.update(self.apiclient, id=self.host_h1.id, hosttags="h1,t1,v1") + + self.updateConfiguration("vm.strict.host.tags", "v1,v2") + self.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + self.updateConfiguration("resource.limit.host.tags", "h1,h2,t1,t2") + + vm = self.deploy_vm(self.host_h1.id, self.template_t1.id, self.service_offering_h1.id) + self.cleanup.append(vm) + + self.assertEqual(self.host_h1.id, vm.hostid, "VM instance was not deployed on target host ID") + try: + vm.stop(self.apiclient) + vm.scale(self.apiclient, serviceOfferingId=self.service_offering_h2.id) + vm.start(self.apiclient) + self.fail("VM should not be be able scale and start") + except Exception as e: + self.assertTrue("No suitable host found for vm " in str(e)) + + +class TestRestoreVMStrictTags(cloudstackTestCase): + @classmethod + def setUpClass(cls): + testClient = super(TestRestoreVMStrictTags, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) + cls.hypervisor = testClient.getHypervisorInfo() + cls.services['mode'] = cls.zone.networktype + + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + + # Create an account, network, VM and IP addresses + cls.account = Account.create(cls.apiclient, cls.services["account"], domainid=cls.domain.id) + cls.service_offering_h1 = ServiceOffering.create(cls.apiclient, cls.services["service_offering_h1"]) + cls.service_offering_h2 = ServiceOffering.create(cls.apiclient, cls.services["service_offering_h2"]) + + cls.template_t1 = Template.register(cls.apiclient, cls.services["test_templates"][cls.hypervisor.lower()], + zoneid=cls.zone.id, hypervisor=cls.hypervisor.lower(), templatetag="t1") + cls.template_t1.download(cls.apiclient) + + cls.template_t2 = Template.register(cls.apiclient, cls.services["test_templates"][cls.hypervisor.lower()], + zoneid=cls.zone.id, hypervisor=cls.hypervisor.lower(), templatetag="t2") + cls.template_t2.download(cls.apiclient) + + hosts = Host.list(cls.apiclient, zoneid=cls.zone.id, type='Routing') + cls.host_h1 = hosts[0] if len(hosts) >= 1 else None + + cls._cleanup = [cls.account, cls.service_offering_h1, cls.service_offering_h2, cls.template_t1, cls.template_t2] + + @classmethod + def tearDownClass(cls): + if cls.host_h1: + Host.update(cls.apiclient, id=cls.host_h1.id, hosttags="") + cls.updateConfiguration("vm.strict.host.tags", "") + cls.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + cls.updateConfiguration("resource.limit.host.tags", "") + super(TestRestoreVMStrictTags, cls).tearDownClass() + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + if self.host_h1: + Host.update(self.apiclient, id=self.host_h1.id, hosttags="h1,t1,v1") + self.updateConfiguration("vm.strict.host.tags", "") + self.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + self.updateConfiguration("resource.limit.host.tags", "") + self.cleanup = [] + + def tearDown(self): + self.cleanup_vm_for_template(self.template_t1.id) + self.cleanup_vm_for_template(self.template_t2.id) + super(TestRestoreVMStrictTags, self).tearDown() + + def cleanup_vm_for_template(self, templateid): + vm_list = VirtualMachine.list(self.apiclient, listall=True, templateid=templateid) + if type(vm_list) is list: + for vm in vm_list: + self.expunge_vm(vm) + + def expunge_vm(self, vm): + try: + cmd = expungeVirtualMachine.expungeVirtualMachineCmd() + cmd.id = vm.id + self.apiclient.expungeVirtualMachine(cmd) + except Exception as e: + self.debug("Failed to expunge VM: %s" % e) + + @classmethod + def updateConfiguration(self, name, value): + cmd = updateConfiguration.updateConfigurationCmd() + cmd.name = name + cmd.value = value + self.apiclient.updateConfiguration(cmd) + + def deploy_vm(self, destination_id, template_id, service_offering_id): + return VirtualMachine.create(self.apiclient, self.services["virtual_machine"], zoneid=self.zone.id, + templateid=template_id, serviceofferingid=service_offering_id, + hostid=destination_id) + + @attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="false") + def test_01_restore_vm_strict_tags_success(self): + self.updateConfiguration("vm.strict.host.tags", "v1,v2") + self.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + self.updateConfiguration("resource.limit.host.tags", "h1,h2") + + vm = self.deploy_vm(self.host_h1.id, self.template_t1.id, self.service_offering_h1.id) + self.cleanup.append(vm) + self.assertEqual(self.host_h1.id, vm.hostid, "VM instance was not deployed on target host ID") + + vm.restore(self.apiclient, templateid=self.template_t2.id, expunge=True) + restored_vm = VirtualMachine.list(self.apiclient, id=vm.id, listall=True)[0] + self.assertEqual(restored_vm.templateid, self.template_t2.id, "VM was not restored") + + @attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="false") + def test_02_restore_vm_strict_tags_failure(self): + self.updateConfiguration("vm.strict.host.tags", "v1,v2") + self.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + self.updateConfiguration("resource.limit.host.tags", "h1,h2,t1,t2") + + vm = self.deploy_vm(self.host_h1.id, self.template_t1.id, self.service_offering_h1.id) + self.cleanup.append(vm) + + self.assertEqual(self.host_h1.id, vm.hostid, "VM instance was not deployed on target host ID") + try: + vm.restore(self.apiclient, templateid=self.template_t2.id, expunge=True) + self.fail("VM should not be restored") + except Exception as e: + self.assertTrue("No suitable host found for vm " in str(e)) + + +class TestMigrateVMStrictTags(cloudstackTestCase): + @classmethod + def setUpClass(cls): + testClient = super(TestMigrateVMStrictTags, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) + cls.hypervisor = testClient.getHypervisorInfo() + cls.services['mode'] = cls.zone.networktype + + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + + hosts = Host.list(cls.apiclient, zoneid=cls.zone.id, type='Routing') + cls.host_h1 = hosts[0] if len(hosts) >= 1 else None + cls.host_h2 = None + if len(hosts) >= 2: + for host in hosts[1:]: + if host.clusterid == cls.host_h1.clusterid: + cls.host_h2 = host + break + + if not cls.host_h2: + cls.skipTest("There are not enough hosts to run this test") + + # Create an account, network, VM and IP addresses + cls.account = Account.create(cls.apiclient, cls.services["account"], domainid=cls.domain.id) + cls.service_offering_h1 = ServiceOffering.create(cls.apiclient, cls.services["service_offering_h1"]) + cls.service_offering_h2 = ServiceOffering.create(cls.apiclient, cls.services["service_offering_h2"]) + + cls.template_t1 = Template.register(cls.apiclient, cls.services["test_templates"][cls.hypervisor.lower()], + zoneid=cls.zone.id, hypervisor=cls.hypervisor.lower(), templatetag="t1") + cls.template_t1.download(cls.apiclient) + + cls.template_t2 = Template.register(cls.apiclient, cls.services["test_templates"][cls.hypervisor.lower()], + zoneid=cls.zone.id, hypervisor=cls.hypervisor.lower(), templatetag="t2") + cls.template_t2.download(cls.apiclient) + + cls._cleanup = [cls.account, cls.service_offering_h1, cls.service_offering_h2, cls.template_t1, cls.template_t2] + + @classmethod + def tearDownClass(cls): + if cls.host_h1: + Host.update(cls.apiclient, id=cls.host_h1.id, hosttags="") + if cls.host_h2: + Host.update(cls.apiclient, id=cls.host_h2.id, hosttags="") + cls.updateConfiguration("vm.strict.host.tags", "") + cls.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + cls.updateConfiguration("resource.limit.host.tags", "") + super(TestMigrateVMStrictTags, cls).tearDownClass() + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + if self.host_h1: + Host.update(self.apiclient, id=self.host_h1.id, hosttags="h1,t1,v1") + self.updateConfiguration("vm.strict.host.tags", "") + self.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + self.updateConfiguration("resource.limit.host.tags", "") + self.cleanup = [] + + def tearDown(self): + self.cleanup_vm_for_template(self.template_t1.id) + self.cleanup_vm_for_template(self.template_t2.id) + super(TestMigrateVMStrictTags, self).tearDown() + + def cleanup_vm_for_template(self, templateid): + vm_list = VirtualMachine.list(self.apiclient, listall=True, templateid=templateid) + if type(vm_list) is list: + for vm in vm_list: + self.expunge_vm(vm) + + def expunge_vm(self, vm): + try: + cmd = expungeVirtualMachine.expungeVirtualMachineCmd() + cmd.id = vm.id + self.apiclient.expungeVirtualMachine(cmd) + except Exception as e: + self.debug("Failed to expunge VM: %s" % e) + + @classmethod + def updateConfiguration(self, name, value): + cmd = updateConfiguration.updateConfigurationCmd() + cmd.name = name + cmd.value = value + self.apiclient.updateConfiguration(cmd) + + def deploy_vm(self, destination_id, template_id, service_offering_id): + return VirtualMachine.create(self.apiclient, self.services["virtual_machine"], zoneid=self.zone.id, + templateid=template_id, serviceofferingid=service_offering_id, + hostid=destination_id) + + @attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="false") + def test_01_migrate_vm_strict_tags_success(self): + self.updateConfiguration("vm.strict.host.tags", "v1,v2") + self.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + self.updateConfiguration("resource.limit.host.tags", "h1,h2,t1,t2") + + vm = self.deploy_vm(self.host_h1.id, self.template_t1.id, self.service_offering_h1.id) + self.cleanup.append(vm) + self.assertEqual(self.host_h1.id, vm.hostid, "VM instance was not deployed on target host ID") + Host.update(self.apiclient, id=self.host_h2.id, hosttags="h1,t1,v1") + vm.migrate(self.apiclient, self.host_h2.id) + migrated_vm = VirtualMachine.list(self.apiclient, id=vm.id, listall=True)[0] + self.assertEqual(migrated_vm.hostid, self.host_h2.id, "VM was not migratd") + + @attr(tags=["advanced", "advancedns", "ssh", "smoke"], required_hardware="false") + def test_02_migrate_vm_strict_tags_failure(self): + self.updateConfiguration("vm.strict.host.tags", "v1,v2") + self.updateConfiguration("vm.strict.resource.limit.host.tag.check", "true") + self.updateConfiguration("resource.limit.host.tags", "h1,h2,t1,t2") + + vm = self.deploy_vm(self.host_h1.id, self.template_t1.id, self.service_offering_h1.id) + self.cleanup.append(vm) + + self.assertEqual(self.host_h1.id, vm.hostid, "VM instance was not deployed on target host ID") + Host.update(self.apiclient, id=self.host_h2.id, hosttags="h2,t2,v2") + try: + vm.migrate(self.apiclient, self.host_h2.id) + VirtualMachine.list(self.apiclient, id=vm.id, listall=True)[0] + self.fail("VM should not be migrated") + except Exception as e: + self.assertTrue("Cannot deploy VM, destination host:" in str(e)) diff --git a/test/integration/smoke/test_volumes.py b/test/integration/smoke/test_volumes.py index 7d64a27eaf2..28a029adf70 100644 --- a/test/integration/smoke/test_volumes.py +++ b/test/integration/smoke/test_volumes.py @@ -1038,6 +1038,33 @@ class TestVolumes(cloudstackTestCase): ) return + @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="false") + def test_14_delete_volume_delete_protection(self): + """Delete a Volume with delete protection + + # Validate the following + # 1. delete volume will fail when delete protection is enabled + # 2. delete volume is successful when delete protection is disabled + """ + + volume = Volume.create( + self.apiclient, + self.services, + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid, + diskofferingid=self.disk_offering.id + ) + volume.update(self.apiclient, deleteprotection=True) + try: + volume.delete(self.apiclient) + self.fail("Volume delete should have failed with delete protection enabled") + except Exception as e: + self.debug("Volume delete failed as expected with error: %s" % e) + + volume.update(self.apiclient, deleteprotection=False) + volume.destroy(self.apiclient, expunge=True) + class TestVolumeEncryption(cloudstackTestCase): diff --git a/test/integration/smoke/test_vpc_ipv6.py b/test/integration/smoke/test_vpc_ipv6.py index efec43add7c..bc05334a56e 100644 --- a/test/integration/smoke/test_vpc_ipv6.py +++ b/test/integration/smoke/test_vpc_ipv6.py @@ -520,7 +520,7 @@ class TestIpv6Vpc(cloudstackTestCase): cmd, hypervisor=self.routerDetailsMap[router.id]['hypervisor'] ) - self.assertTrue(type(result) == list and len(result) > 0, + self.assertTrue(type(result) == list, "%s on router %s returned invalid result" % (cmd, router.id)) result = '\n'.join(result) return result diff --git a/test/integration/smoke/test_webhook_delivery.py b/test/integration/smoke/test_webhook_delivery.py new file mode 100644 index 00000000000..00178bd03ff --- /dev/null +++ b/test/integration/smoke/test_webhook_delivery.py @@ -0,0 +1,212 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" BVT tests for webhooks delivery with a basic server +""" +# Import Local Modules +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.lib.base import (Account, + Domain, + Webhook, + SSHKeyPair) +from marvin.lib.common import (get_domain, + get_zone) +from marvin.lib.utils import (random_gen) +from marvin.cloudstackException import CloudstackAPIException +from nose.plugins.attrib import attr +from http.server import BaseHTTPRequestHandler, HTTPServer +import logging +# Import System modules +import time +import json +import socket +import _thread + + +_multiprocess_shared_ = True +deliveries_received = [] + +class WebhookReceiver(BaseHTTPRequestHandler): + """ + WebhookReceiver class to receive webhook events + """ + def _set_response(self): + self.send_response(200) + self.send_header('Content-type', 'text/html') + self.end_headers() + + def do_POST(self): + content_length = int(self.headers['Content-Length']) + post_data = self.rfile.read(content_length) + post_data = post_data.decode('utf-8') + event_id = self.headers.get('X-CS-Event-ID') + print("POST request,\nPath: %s\nHeaders:\n%s\n\nBody:\n%s\n" % + (str(self.path), str(self.headers), post_data)) + self._set_response() + global deliveries_received + if deliveries_received is None: + deliveries_received = [] + deliveries_received.append({'event': event_id, 'payload': post_data}) + if event_id != None: + self.wfile.write("Event with ID: {} successfully processed!".format(str(event_id)).encode('utf-8')) + else: + self.wfile.write("POST request for {}".format(self.path).encode('utf-8')) + +class TestWebhookDelivery(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestWebhookDelivery, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__ + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) + cls.logger = logging.getLogger('TestWebhookDelivery') + cls.logger.setLevel(logging.DEBUG) + + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.connect((cls.mgtSvrDetails["mgtSvrIp"], cls.mgtSvrDetails["port"])) + cls.server_ip = s.getsockname()[0] + s.close() + if cls.server_ip == "127.0.0.1": + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.connect(("8.8.8.8", 80)) + cls.server_ip = s.getsockname()[0] + s.close() + # use random port for webhookreceiver server + s = socket.socket() + s.bind(('', 0)) + cls.server_port = s.getsockname()[1] + s.close() + cls.webhook_receiver_url = "http://" + cls.server_ip + ":" + str(cls.server_port) + cls.logger.debug("Running Webhook receiver @ %s" % cls.webhook_receiver_url) + def startMgmtServer(tname, server): + cls.logger.debug("Starting WebhookReceiver") + try: + server.serve_forever() + except Exception: pass + cls.server = HTTPServer(('0.0.0.0', cls.server_port), WebhookReceiver) + _thread.start_new_thread(startMgmtServer, ("webhook-receiver", cls.server,)) + + cls._cleanup = [] + + @classmethod + def tearDownClass(cls): + if cls.server: + cls.server.socket.close() + global deliveries_received + deliveries_received = [] + super(TestWebhookDelivery, cls).tearDownClass() + + def setUp(self): + self.cleanup = [] + self.domain1 = Domain.create( + self.apiclient, + self.services["domain"]) + self.cleanup.append(self.domain1) + + def tearDown(self): + super(TestWebhookDelivery, self).tearDown() + + def popItemFromCleanup(self, item_id): + for idx, x in enumerate(self.cleanup): + if x.id == item_id: + self.cleanup.pop(idx) + break + + def createDomainAccount(self, isDomainAdmin=False): + self.account = Account.create( + self.apiclient, + self.services["account"], + admin=isDomainAdmin, + domainid=self.domain1.id) + self.cleanup.append(self.account) + self.userapiclient = self.testClient.getUserApiClient( + UserName=self.account.name, + DomainName=self.account.domain + ) + + def createWebhook(self, apiclient, scope=None, domainid=None, account=None, payloadurl=None, description=None, sslverification=None, secretkey=None, state=None): + name = "Test-" + random_gen() + if payloadurl is None: + payloadurl = self.webhook_receiver_url + self.webhook = Webhook.create( + apiclient, + name=name, + payloadurl=payloadurl, + description=description, + scope=scope, + sslverification=sslverification, + secretkey=secretkey, + state=state, + domainid=domainid, + account=account + ) + self.cleanup.append(self.webhook) + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_01_webhook_deliveries(self): + global deliveries_received + self.createDomainAccount() + self.createWebhook(self.userapiclient) + self.keypair = SSHKeyPair.register( + self.userapiclient, + name="Test-" + random_gen(), + publickey="ssh-rsa: e6:9a:1e:b5:98:75:88:5d:56:bc:92:7b:43:48:05:b2" + ) + self.logger.debug("Registered sshkeypair: %s" % str(self.keypair.__dict__)) + time.sleep(2) + list_deliveries = self.webhook.list_deliveries( + self.userapiclient, + page=1, + pagesize=20 + ) + self.assertNotEqual( + list_deliveries, + None, + "Check webhook deliveries list" + ) + self.assertTrue( + len(list_deliveries) > 0, + "Check webhook deliveries list length" + ) + for delivery in list_deliveries: + self.assertEqual( + delivery.success, + True, + "Check webhook delivery success" + ) + self.assertEqual( + delivery.response, + ("Event with ID: %s successfully processed!" % delivery.eventid), + "Check webhook delivery response" + ) + delivery_matched = False + for received in deliveries_received: + if received['event'] == delivery.eventid: + self.assertEqual( + delivery.payload, + received['payload'], + "Check webhook delivery payload" + ) + delivery_matched = True + self.assertTrue( + delivery_matched, + "Delivery for %s did not match with server" % delivery.id + ) diff --git a/test/integration/smoke/test_webhook_lifecycle.py b/test/integration/smoke/test_webhook_lifecycle.py new file mode 100644 index 00000000000..2d1f322be6b --- /dev/null +++ b/test/integration/smoke/test_webhook_lifecycle.py @@ -0,0 +1,392 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" BVT tests for webhooks lifecycle functionalities +""" +# Import Local Modules +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.cloudstackAPI import (listEvents) +from marvin.lib.base import (Account, + Domain, + Webhook, + SSHKeyPair) +from marvin.lib.common import (get_domain, + get_zone) +from marvin.lib.utils import (random_gen) +from marvin.cloudstackException import CloudstackAPIException +from nose.plugins.attrib import attr +import logging +# Import System modules +import time +from datetime import datetime + + +_multiprocess_shared_ = True +HTTP_PAYLOAD_URL = "http://smee.io/C9LPa7Ei3iB6Qj2" +HTTPS_PAYLOAD_URL = "https://smee.io/C9LPa7Ei3iB6Qj2" + +class TestWebhooks(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestWebhooks, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) + + cls._cleanup = [] + cls.logger = logging.getLogger('TestWebhooks') + cls.logger.setLevel(logging.DEBUG) + + @classmethod + def tearDownClass(cls): + super(TestWebhooks, cls).tearDownClass() + + def setUp(self): + self.cleanup = [] + self.domain1 = Domain.create( + self.apiclient, + self.services["domain"]) + self.cleanup.append(self.domain1) + + def tearDown(self): + super(TestWebhooks, self).tearDown() + + def popItemFromCleanup(self, item_id): + for idx, x in enumerate(self.cleanup): + if x.id == item_id: + self.cleanup.pop(idx) + break + + def createDomainAccount(self, isDomainAdmin=False): + self.account = Account.create( + self.apiclient, + self.services["account"], + admin=isDomainAdmin, + domainid=self.domain1.id) + self.cleanup.append(self.account) + self.userapiclient = self.testClient.getUserApiClient( + UserName=self.account.name, + DomainName=self.account.domain + ) + + def runWebhookLifecycleTest(self, apiclient, scope=None, domainid=None, account=None, normaluser=None, payloadurl=None, description=None, sslverification=None, secretkey=None, state=None, isdelete=True): + name = "Test-" + random_gen() + if payloadurl is None: + payloadurl = HTTP_PAYLOAD_URL + self.webhook = Webhook.create( + apiclient, + name=name, + payloadurl=payloadurl, + description=description, + scope=scope, + sslverification=sslverification, + secretkey=secretkey, + state=state, + domainid=domainid, + account=account + ) + self.cleanup.append(self.webhook) + self.assertNotEqual( + self.webhook, + None, + "Check webhook created" + ) + webhook_id = self.webhook.id + self.logger.debug("Created webhook: %s" % str(self.webhook.__dict__)) + self.assertEqual( + name, + self.webhook.name, + "Check webhook name" + ) + self.assertEqual( + payloadurl, + self.webhook.payloadurl, + "Check webhook payloadurl" + ) + if state is None: + state = 'Enabled' + self.assertEqual( + state, + self.webhook.state, + "Check webhook state" + ) + if scope is None or normaluser is not None: + scope = 'Local' + self.assertEqual( + scope, + self.webhook.scope, + "Check webhook scope" + ) + if sslverification is None: + sslverification = False + self.assertEqual( + sslverification, + self.webhook.sslverification, + "Check webhook sslverification" + ) + if domainid is not None: + if normaluser is not None: + domainid = normaluser.domainid + self.assertEqual( + domainid, + self.webhook.domainid, + "Check webhook domainid" + ) + if account is not None: + self.assertEqual( + account, + self.webhook.account, + "Check webhook account" + ) + if description is not None: + self.assertEqual( + description, + self.webhook.description, + "Check webhook description" + ) + if secretkey is not None: + self.assertEqual( + secretkey, + self.webhook.secretkey, + "Check webhook secretkey" + ) + list_webhook = Webhook.list( + apiclient, + id=webhook_id + ) + self.assertNotEqual( + list_webhook, + None, + "Check webhook list" + ) + self.assertEqual( + len(list_webhook), + 1, + "Check webhook list length" + ) + self.assertEqual( + list_webhook[0].id, + webhook_id, + "Check webhook list item" + ) + if isdelete == False: + return + self.webhook.delete(apiclient) + self.popItemFromCleanup(webhook_id) + list_webhook = Webhook.list( + apiclient, + id=webhook_id + ) + self.assertTrue( + list_webhook is None or len(list_webhook) == 0, + "Check webhook list after delete" + ) + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_01_create_webhook_admin_local(self): + self.runWebhookLifecycleTest(self.apiclient) + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_02_create_webhook_admin_domain(self): + self.runWebhookLifecycleTest(self.apiclient, 'Domain', self.domain1.id) + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_03_create_webhook_admin_global(self): + self.runWebhookLifecycleTest(self.apiclient, 'Global') + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_04_create_webhook_domainadmin_local(self): + self.createDomainAccount(True) + self.runWebhookLifecycleTest(self.userapiclient) + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_05_create_webhook_domainadmin_subdomain(self): + self.createDomainAccount(True) + self.domain11 = Domain.create( + self.apiclient, + self.services["domain"], + parentdomainid=self.domain1.id) + self.cleanup.append(self.domain11) + self.runWebhookLifecycleTest(self.userapiclient, 'Domain', self.domain11.id) + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_06_create_webhook_domainadmin_global_negative(self): + self.createDomainAccount(True) + try: + self.runWebhookLifecycleTest(self.userapiclient, 'Global') + except CloudstackAPIException as e: + self.assertTrue( + "errorText:Scope Global can not be specified for owner" in str(e), + "Check Global scope error check" + ) + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_07_create_webhook_user_local(self): + self.createDomainAccount() + self.runWebhookLifecycleTest(self.userapiclient) + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_08_create_webhook_user_domain(self): + """For normal user scope will always be Local irrespective of the passed value + """ + self.createDomainAccount() + self.runWebhookLifecycleTest(self.userapiclient, 'Domain', self.domain1.id, normaluser=self.account) + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_09_create_webhook_user_gloabl(self): + """For normal user scope will always be Local irrespective of the passed value + """ + self.createDomainAccount() + self.runWebhookLifecycleTest(self.userapiclient, 'Global', normaluser=self.account) + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_10_create_webhook_admin_advanced(self): + self.createDomainAccount() + self.runWebhookLifecycleTest( + self.apiclient, + payloadurl=HTTPS_PAYLOAD_URL, + scope="Local", + description="Webhook", + sslverification=True, + secretkey="webhook", + state="Disabled", + domainid=self.domain1.id, + account=self.account.name + ) + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_11_update_webhook(self): + self.createDomainAccount() + self.runWebhookLifecycleTest(self.userapiclient, isdelete=False) + description = "Desc-" + random_gen() + secretkey = random_gen() + state = 'Disabled' + updated_webhook = self.webhook.update( + self.userapiclient, + description=description, + secretkey=secretkey, + state=state + )['webhook'] + self.assertNotEqual( + updated_webhook, + None, + "Check updated webhook" + ) + self.assertEqual( + description, + updated_webhook.description, + "Check webhook description" + ) + self.assertEqual( + secretkey, + updated_webhook.secretkey, + "Check webhook secretkey" + ) + self.assertEqual( + state, + updated_webhook.state, + "Check webhook state" + ) + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_12_list_user_webhook_deliveries(self): + self.createDomainAccount() + self.runWebhookLifecycleTest(self.userapiclient, isdelete=False) + now = datetime.now() # current date and time + start_time = now.strftime("%Y-%m-%d %H:%M:%S") + self.keypair = SSHKeyPair.register( + self.userapiclient, + name="Test-" + random_gen(), + publickey="ssh-rsa: e6:9a:1e:b5:98:75:88:5d:56:bc:92:7b:43:48:05:b2" + ) + self.logger.debug("Registered sshkeypair: %s" % str(self.keypair.__dict__)) + cmd = listEvents.listEventsCmd() + cmd.startdate = start_time + cmd.listall = True + events = self.apiclient.listEvents(cmd) + register_sshkeypair_event_count = 0 + if events is not None: + for event in events: + if event.type == "REGISTER.SSH.KEYPAIR": + register_sshkeypair_event_count = register_sshkeypair_event_count + 1 + time.sleep(5) + list_deliveries = self.webhook.list_deliveries( + self.userapiclient, + page=1, + pagesize=20 + ) + self.assertNotEqual( + list_deliveries, + None, + "Check webhook deliveries list" + ) + self.assertTrue( + len(list_deliveries) > 0, + "Check webhook deliveries list length" + ) + register_sshkeypair_delivery_count = 0 + for delivery in list_deliveries: + if delivery.eventtype == "REGISTER.SSH.KEYPAIR": + register_sshkeypair_delivery_count = register_sshkeypair_delivery_count + 1 + self.assertEqual( + register_sshkeypair_event_count, + register_sshkeypair_delivery_count, + "Check sshkeypair webhook deliveries count" + ) + self.webhook.delete_deliveries( + self.userapiclient + ) + list_deliveries = self.webhook.list_deliveries( + self.userapiclient + ) + self.assertTrue( + list_deliveries is None or len(list_deliveries) == 0, + "Check webhook deliveries list after delete" + ) + + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_13_webhook_execute_delivery(self): + self.createDomainAccount() + self.runWebhookLifecycleTest(self.userapiclient, isdelete=False) + payload = "{ \"CloudStack\": \"Integration Test\" }" + delivery = self.webhook.execute_delivery( + self.userapiclient, + payload=payload + ) + self.assertNotEqual( + delivery, + None, + "Check test webhook delivery" + ) + self.assertEqual( + self.webhook.id, + delivery.webhookid, + "Check test webhook delivery webhook" + ) + self.assertEqual( + payload, + delivery.payload, + "Check test webhook delivery payload" + ) + self.assertEqual( + self.webhook.id, + delivery.webhookid, + "Check test webhook delivery webhook" + ) diff --git a/test/integration/testpaths/testpath_vmlc.py b/test/integration/testpaths/testpath_vmlc.py index 0ae70c8e4bd..8384a472a0b 100755 --- a/test/integration/testpaths/testpath_vmlc.py +++ b/test/integration/testpaths/testpath_vmlc.py @@ -338,7 +338,7 @@ class TestPathVMLC(cloudstackTestCase): # 13. Find suitable host for VM to migrate and migrate the VM # 14. Verify VM accessibility on new host """ - if self.hypervisor.lower() in ['hyperv', 'lxc'] and value == VPC_NETWORK: + if self.hypervisor.lower() in ['hyperv', 'lxc'] and value == VPC_NETWORK: self.skipTest("can't be run for {} hypervisor".format(self.hypervisor)) # List created service offering in setUpClass by name diff --git a/test/metadata/adapter.xml b/test/metadata/adapter.xml index 84eeaa0adee..3de39d58d72 100644 --- a/test/metadata/adapter.xml +++ b/test/metadata/adapter.xml @@ -18,9 +18,9 @@ under the License. --> - + - + 192.168.131.168 hostip @@ -53,7 +53,7 @@ under the License. 204 globalnetworkid - + 192.168.131.168 @@ -152,19 +152,19 @@ under the License. flatnetworkdiskofferingid - + ../metadata/func/sanity.xml SANITY TEST - + com.cloud.test.regression.SanityTest ../metadata/func/regression.xml REGRESSION TEST - + ../metadata/func/templatedwnldstress.xml TEMPLATE DOWNLOAD STRESS TEST @@ -173,8 +173,8 @@ under the License. - - + + @@ -272,7 +272,7 @@ under the License. - + - + diff --git a/test/metadata/delegatedAdmin/delegated_admin_cleanup.xml b/test/metadata/delegatedAdmin/delegated_admin_cleanup.xml index 734fe667b4b..ac0f390ba90 100644 --- a/test/metadata/delegatedAdmin/delegated_admin_cleanup.xml +++ b/test/metadata/delegatedAdmin/delegated_admin_cleanup.xml @@ -17,7 +17,7 @@ specific language governing permissions and limitations under the License. --> - + registerUserKeys @@ -37,7 +37,7 @@ under the License. secretkey - + listDomains @@ -55,8 +55,8 @@ under the License. - - + + listDomains true @@ -73,7 +73,7 @@ under the License. - + deleteUser @@ -83,7 +83,7 @@ under the License. - + deleteUser @@ -93,7 +93,7 @@ under the License. - + deleteDomain @@ -123,4 +123,4 @@ under the License. - + diff --git a/test/metadata/delegatedAdmin/delegated_admin_createusers.xml b/test/metadata/delegatedAdmin/delegated_admin_createusers.xml index 8ea7dd81696..27ce77f6cf7 100644 --- a/test/metadata/delegatedAdmin/delegated_admin_createusers.xml +++ b/test/metadata/delegatedAdmin/delegated_admin_createusers.xml @@ -36,7 +36,7 @@ under the License. - + createDomain @@ -51,8 +51,8 @@ under the License. level1domain2id - - + + createDomain @@ -72,7 +72,7 @@ under the License. - + createDomain @@ -173,8 +173,8 @@ under the License. - - + + createUser @@ -218,8 +218,8 @@ under the License. - - + + createUser @@ -262,8 +262,8 @@ under the License. domain12adminaccount - - + + createUser @@ -307,6 +307,6 @@ under the License. - + diff --git a/test/metadata/delegatedAdmin/delegated_admin_verify_part1.xml b/test/metadata/delegatedAdmin/delegated_admin_verify_part1.xml index 60815928df6..1ce18187d52 100644 --- a/test/metadata/delegatedAdmin/delegated_admin_verify_part1.xml +++ b/test/metadata/delegatedAdmin/delegated_admin_verify_part1.xml @@ -36,7 +36,7 @@ under the License. secretkey - + @@ -58,7 +58,7 @@ under the License. diskofferingid globaldiskofferingid - + @@ -67,8 +67,8 @@ under the License. - - + + createVolume @@ -84,7 +84,7 @@ under the License. diskofferingid globaldiskofferingid - + @@ -92,9 +92,9 @@ under the License. volumeid - - - + + + createTemplate @@ -123,8 +123,8 @@ under the License. - - + + registerTemplate Register template @@ -159,9 +159,9 @@ under the License. registeredtemplateid - - - + + + registerIso @@ -193,8 +193,8 @@ under the License. isoid - - + + createSnapshot @@ -211,8 +211,8 @@ under the License. snapshotid - - + + createSnapshotPolicy true @@ -244,8 +244,8 @@ under the License. snapshotpolicyid - - + + @@ -258,14 +258,14 @@ under the License. globalzoneid - + ipaddress ipaddress - + @@ -287,8 +287,8 @@ under the License. pfgroupid - - + + createPortForwardingService Create port forwarding service @@ -308,8 +308,8 @@ under the License. pfgroupid1 - - + + createPortForwardingServiceRule Create port forwarding service rule @@ -334,7 +334,7 @@ under the License. pfruleid - + @@ -367,9 +367,9 @@ under the License. lbid - - - + + + createLoadBalancerRule Create load balancer rule @@ -399,10 +399,10 @@ under the License. lbid1 - - + + - assignToLoadBalancerRule + assignToLoadBalancerRule Assign to load balancer true @@ -415,6 +415,6 @@ under the License. vmid - - - + + + diff --git a/test/metadata/delegatedAdmin/delegated_admin_verify_part2.xml b/test/metadata/delegatedAdmin/delegated_admin_verify_part2.xml index e36f27887d3..9f875054439 100644 --- a/test/metadata/delegatedAdmin/delegated_admin_verify_part2.xml +++ b/test/metadata/delegatedAdmin/delegated_admin_verify_part2.xml @@ -37,9 +37,9 @@ under the License. - - - + + + rebootVirtualMachine Reboot virtual machine @@ -51,7 +51,7 @@ under the License. - + startVirtualMachine Start virtual machine @@ -63,7 +63,7 @@ under the License. - + stopVirtualMachine Stop virtual machine @@ -75,7 +75,7 @@ under the License. - + resetPasswordForVirtualMachine Reset password for virtual machine @@ -87,7 +87,7 @@ under the License. - + changeServiceForVirtualMachine Change service offering for virtual machine @@ -103,7 +103,7 @@ under the License. - + updateVirtualMachine Update virtual machine @@ -119,7 +119,7 @@ under the License. - + listVirtualMachines List virtual machines @@ -131,7 +131,7 @@ under the License. - + deployVirtualMachine true @@ -151,7 +151,7 @@ under the License. diskofferingid globaldiskofferingid - + domainid domainid1 @@ -162,8 +162,8 @@ under the License. - - + + listVolumes @@ -175,8 +175,8 @@ under the License. volumeid - - + + attachVolume Attach volume @@ -192,7 +192,7 @@ under the License. - + detachVolume Detach volume @@ -204,7 +204,7 @@ under the License. - + createVolume Creating volume @@ -220,7 +220,7 @@ under the License. diskofferingid globaldiskofferingid - + domainid domainid1 @@ -230,8 +230,8 @@ under the License. account1 - - + + @@ -245,7 +245,7 @@ under the License. - + createSnapshot Creating snapshot @@ -265,7 +265,7 @@ under the License. - + listSnapshotPolicies Listing snapshot policies @@ -277,7 +277,7 @@ under the License. - + createSnapshotPolicy true @@ -312,7 +312,7 @@ under the License. - + updateTemplate @@ -329,7 +329,7 @@ under the License. - + updateTemplatePermissions Update template permissions @@ -345,7 +345,7 @@ under the License. - + copyTemplate Copy template @@ -365,7 +365,7 @@ under the License. - + listTemplates List templates @@ -377,7 +377,7 @@ under the License. - + listTemplatePermissions List template permissions @@ -389,7 +389,7 @@ under the License. - + createTemplate Create template @@ -419,9 +419,9 @@ under the License. account1 - - - + + + updateTemplate @@ -438,7 +438,7 @@ under the License. - + updateTemplatePermissions Update template permissions @@ -454,7 +454,7 @@ under the License. - + copyTemplate Copy template @@ -474,7 +474,7 @@ under the License. - + listTemplates List templates @@ -486,7 +486,7 @@ under the License. - + listTemplatePermissions List template permissions @@ -498,7 +498,7 @@ under the License. - + @@ -517,7 +517,7 @@ under the License. - + listIsos List isos @@ -529,7 +529,7 @@ under the License. - + copyIso Copy iso @@ -548,8 +548,8 @@ under the License. globalzoneid1 - - + + attachIso true @@ -564,8 +564,8 @@ under the License. isoid - - + + detachIso true @@ -576,12 +576,12 @@ under the License. vmid - - + + listPublicIpAddresses - true + true List public ip addresses @@ -590,7 +590,7 @@ under the License. - + associateIpAddress true @@ -610,7 +610,7 @@ under the License. - + @@ -623,8 +623,8 @@ under the License. pfgroupid - - + + listPortForwardingServiceRules List port forwarding service rule @@ -635,8 +635,8 @@ under the License. pfgroupid - - + + listPortForwardingServicesByVm List port forwarding services by vm @@ -647,7 +647,7 @@ under the License. vmid - + removePortForwardingService @@ -667,8 +667,8 @@ under the License. ipaddress - - + + assignPortForwardingService true @@ -687,8 +687,8 @@ under the License. ipaddress - - + + createPortForwardingService Create port forwarding service @@ -710,8 +710,8 @@ under the License. account1 - - + + createPortForwardingServiceRule Create port forwarding service rule @@ -730,8 +730,8 @@ under the License. pfgroupid - - + + deletePortForwardingServiceRule Delete port forwarding service rule @@ -743,8 +743,8 @@ under the License. - - + + deletePortForwardingService true @@ -756,8 +756,8 @@ under the License. - - + + listLoadBalancerRules @@ -769,8 +769,8 @@ under the License. lbid - - + + listLoadBalancerRuleInstances List load balancer rule instances @@ -781,8 +781,8 @@ under the License. lbid - - + + removeFromLoadBalancerRule Remove from load balancer @@ -797,8 +797,8 @@ under the License. vmid - - + + assignToLoadBalancerRule Assign to load balancer @@ -813,8 +813,8 @@ under the License. vmid - - + + deleteLoadBalancerRule Delete load balancer rule @@ -825,8 +825,8 @@ under the License. lbid - - + + createLoadBalancerRule Create load balancer rule @@ -850,13 +850,13 @@ under the License. roundrobin - - - + + + disassociateIpAddress - true + true Disassociate ip address @@ -876,7 +876,7 @@ under the License. - + deleteSnapshotPolicies Deleting snapshot policy @@ -888,7 +888,7 @@ under the License. - + deleteTemplate Delete template @@ -900,7 +900,7 @@ under the License. - + deleteTemplate Delete template @@ -912,7 +912,7 @@ under the License. - + deleteVolume Deleting volume @@ -924,7 +924,7 @@ under the License. - + deleteIso Delete iso @@ -935,8 +935,8 @@ under the License. isoid - - + + destroyVirtualMachine Destroy virtual machine @@ -947,6 +947,6 @@ under the License. vmid - + - + diff --git a/test/metadata/delegatedAdmin/pickuser_domainlevel1_domainlevel2.xml b/test/metadata/delegatedAdmin/pickuser_domainlevel1_domainlevel2.xml index 42eb498ec35..6b66d9aad33 100644 --- a/test/metadata/delegatedAdmin/pickuser_domainlevel1_domainlevel2.xml +++ b/test/metadata/delegatedAdmin/pickuser_domainlevel1_domainlevel2.xml @@ -47,8 +47,8 @@ under the License. account1 - - + + listDomains @@ -67,10 +67,10 @@ under the License. domainlevel1 - - + - + + listUsers @@ -98,7 +98,7 @@ under the License. - + listDomains @@ -118,5 +118,5 @@ under the License. - + diff --git a/test/metadata/delegatedAdmin/pickuser_domainlevel1admin_domainlevel1admin.xml b/test/metadata/delegatedAdmin/pickuser_domainlevel1admin_domainlevel1admin.xml index b0cd1614fd0..f283affe292 100644 --- a/test/metadata/delegatedAdmin/pickuser_domainlevel1admin_domainlevel1admin.xml +++ b/test/metadata/delegatedAdmin/pickuser_domainlevel1admin_domainlevel1admin.xml @@ -47,8 +47,8 @@ under the License. account1 - - + + listDomains @@ -67,10 +67,10 @@ under the License. domainlevel1 - - + - + + listUsers @@ -98,7 +98,7 @@ under the License. - + listDomains @@ -118,5 +118,5 @@ under the License. - + diff --git a/test/metadata/delegatedAdmin/pickuser_domainlevel1admin_rootadmin.xml b/test/metadata/delegatedAdmin/pickuser_domainlevel1admin_rootadmin.xml index 87cb933bd00..29c4a5ac01e 100644 --- a/test/metadata/delegatedAdmin/pickuser_domainlevel1admin_rootadmin.xml +++ b/test/metadata/delegatedAdmin/pickuser_domainlevel1admin_rootadmin.xml @@ -47,8 +47,8 @@ under the License. account1 - - + + listDomains @@ -67,10 +67,10 @@ under the License. domainlevel1 - - + - + + listUsers @@ -98,7 +98,7 @@ under the License. - + listDomains @@ -118,5 +118,5 @@ under the License. - + diff --git a/test/metadata/delegatedAdmin/pickuser_domainlevel2_child_domainlevel1.xml b/test/metadata/delegatedAdmin/pickuser_domainlevel2_child_domainlevel1.xml index 5e77ca95932..662fe0f25c1 100644 --- a/test/metadata/delegatedAdmin/pickuser_domainlevel2_child_domainlevel1.xml +++ b/test/metadata/delegatedAdmin/pickuser_domainlevel2_child_domainlevel1.xml @@ -47,8 +47,8 @@ under the License. account1 - - + + listDomains @@ -67,10 +67,10 @@ under the License. domainlevel1 - - + - + + listUsers @@ -98,7 +98,7 @@ under the License. - + listDomains @@ -118,5 +118,5 @@ under the License. - + diff --git a/test/metadata/delegatedAdmin/pickuser_domainlevel2_nonchild_domainlevel1.xml b/test/metadata/delegatedAdmin/pickuser_domainlevel2_nonchild_domainlevel1.xml index b2192a2f03d..18a7b421343 100644 --- a/test/metadata/delegatedAdmin/pickuser_domainlevel2_nonchild_domainlevel1.xml +++ b/test/metadata/delegatedAdmin/pickuser_domainlevel2_nonchild_domainlevel1.xml @@ -47,8 +47,8 @@ under the License. account1 - - + + listDomains @@ -67,10 +67,10 @@ under the License. domainlevel1 - - + - + + listUsers @@ -98,7 +98,7 @@ under the License. - + listDomains @@ -118,5 +118,5 @@ under the License. - + diff --git a/test/metadata/delegatedAdmin/pickuser_domainlevel2_rootadmin1.xml b/test/metadata/delegatedAdmin/pickuser_domainlevel2_rootadmin1.xml index ecf06af098f..c1b91351cbb 100644 --- a/test/metadata/delegatedAdmin/pickuser_domainlevel2_rootadmin1.xml +++ b/test/metadata/delegatedAdmin/pickuser_domainlevel2_rootadmin1.xml @@ -47,8 +47,8 @@ under the License. account1 - - + + listDomains @@ -67,10 +67,10 @@ under the License. domainlevel1 - - + - + + listUsers @@ -98,7 +98,7 @@ under the License. - + listDomains @@ -118,5 +118,5 @@ under the License. - + diff --git a/test/metadata/delegatedAdmin/pickuser_rootadmin1_rootadmin2.xml b/test/metadata/delegatedAdmin/pickuser_rootadmin1_rootadmin2.xml index d7c7a03d676..aa85730890a 100644 --- a/test/metadata/delegatedAdmin/pickuser_rootadmin1_rootadmin2.xml +++ b/test/metadata/delegatedAdmin/pickuser_rootadmin1_rootadmin2.xml @@ -47,8 +47,8 @@ under the License. account1 - - + + listDomains @@ -67,10 +67,10 @@ under the License. domainlevel1 - - + - + + listUsers @@ -98,7 +98,7 @@ under the License. - + listDomains @@ -118,5 +118,5 @@ under the License. - + diff --git a/test/metadata/delegatedAdmin/pickuser_rootadmin_vs_domainlevel1admin.xml b/test/metadata/delegatedAdmin/pickuser_rootadmin_vs_domainlevel1admin.xml index 361b934eee5..79da605f363 100644 --- a/test/metadata/delegatedAdmin/pickuser_rootadmin_vs_domainlevel1admin.xml +++ b/test/metadata/delegatedAdmin/pickuser_rootadmin_vs_domainlevel1admin.xml @@ -47,8 +47,8 @@ under the License. account1 - - + + listDomains @@ -67,10 +67,10 @@ under the License. domainlevel1 - - + - + + listUsers @@ -98,7 +98,7 @@ under the License. - + listDomains @@ -118,5 +118,5 @@ under the License. - + diff --git a/test/metadata/func/directnw_regression.xml b/test/metadata/func/directnw_regression.xml index 944ee3c314f..3cce8b1f869 100644 --- a/test/metadata/func/directnw_regression.xml +++ b/test/metadata/func/directnw_regression.xml @@ -18,10 +18,10 @@ under the License. --> - - + + - + createAccount [Create the Account and a User for the account] @@ -59,7 +59,7 @@ under the License. password - + id @@ -164,11 +164,11 @@ under the License. - + - + listUsers 5 [List the Created User in (3)] @@ -177,7 +177,7 @@ under the License. accountname - + id @@ -222,14 +222,14 @@ under the License. domain ROOT - + - - - + + + - + createUser 3 [Create User Command] @@ -261,9 +261,9 @@ under the License. email nimbus-user@gmail.com - + - + id @@ -312,7 +312,7 @@ under the License. - + @@ -324,7 +324,7 @@ under the License. accountname - + id @@ -429,7 +429,7 @@ under the License. - + @@ -473,7 +473,7 @@ under the License. usersecretkey - + id @@ -534,7 +534,7 @@ under the License. - + @@ -606,11 +606,11 @@ under the License. - + - - + @@ -753,7 +753,7 @@ under the License. - + @@ -825,7 +825,7 @@ under the License. - + @@ -945,7 +945,7 @@ under the License. state locked - + @@ -963,7 +963,7 @@ under the License. domainid 1 - + @@ -1065,7 +1065,7 @@ under the License. state enabled - + @@ -1189,7 +1189,7 @@ under the License. state disabled - + @@ -1207,7 +1207,7 @@ under the License. domainid 1 - + @@ -1309,7 +1309,7 @@ under the License. state enabled - + @@ -1325,7 +1325,7 @@ under the License. domainname - + name @@ -1366,7 +1366,7 @@ under the License. domainname - + id @@ -1414,7 +1414,7 @@ under the License. newdomainname - + id @@ -1456,7 +1456,7 @@ under the License. 1 - + domain 0 @@ -1486,8 +1486,8 @@ under the License. - - + + @@ -1503,7 +1503,7 @@ under the License. true - + success @@ -1528,7 +1528,7 @@ under the License. 1 - + template 0 @@ -1586,7 +1586,7 @@ under the License. size - debian50templatesize + debian50templatesize account @@ -1619,7 +1619,7 @@ under the License. domainid 1 - + @@ -1829,7 +1829,7 @@ under the License. offerha false - + @@ -1867,7 +1867,7 @@ under the License. vlan - guestcidraddress + guestcidraddress networktype diff --git a/test/metadata/func/error_events.properties b/test/metadata/func/error_events.properties index 4d440e6981c..e6909b42ec5 100644 --- a/test/metadata/func/error_events.properties +++ b/test/metadata/func/error_events.properties @@ -5,9 +5,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/test/metadata/func/expunge.xml b/test/metadata/func/expunge.xml index 72ccb2afb6e..d008dc3319f 100644 --- a/test/metadata/func/expunge.xml +++ b/test/metadata/func/expunge.xml @@ -19,7 +19,7 @@ under the License. - + + --> createAccount [Create the Account and a User for the account] @@ -95,7 +95,7 @@ under the License. accountname - + id @@ -104,7 +104,7 @@ under the License. - - + registerUserKeys Registering the user @@ -205,7 +205,7 @@ under the License. - + deployVirtualMachine true @@ -222,7 +222,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -259,7 +259,7 @@ under the License. - + listVirtualMachines List virtual machines @@ -276,7 +276,7 @@ under the License. - + listHosts List host where vm is running @@ -293,7 +293,7 @@ under the License. - + listRouters List Routers @@ -326,7 +326,7 @@ under the License. - + listHosts List host where domR is running @@ -342,8 +342,8 @@ under the License. routerhost_ipaddress - - + + listVolumes List data disk vm volume @@ -368,7 +368,7 @@ under the License. - + select path from volumes true @@ -386,7 +386,7 @@ under the License. - + listVolumes List root disk vm volume @@ -411,7 +411,7 @@ under the License. - + select path from volumes true @@ -429,7 +429,7 @@ under the License. - + select path from volumes true @@ -463,7 +463,7 @@ under the License. - + listvdi.sh @@ -478,8 +478,8 @@ under the License. vm_rootvolumepath - - + + listvdi.sh @@ -494,7 +494,7 @@ under the License. router_rootvolumepath - + @@ -507,7 +507,7 @@ under the License. - + listConfigurations Getting expunge interval value @@ -524,7 +524,7 @@ under the License. - + sleep.sh @@ -535,9 +535,9 @@ under the License. expunge.interval - - - + + + sleep.sh @@ -548,9 +548,9 @@ under the License. expunge.interval - - - + + + listvdi.sh true @@ -567,7 +567,7 @@ under the License. - + listvdi.sh true @@ -584,8 +584,8 @@ under the License. - - + + createVolume @@ -602,7 +602,7 @@ under the License. diskofferingid globaldiskofferingid - + @@ -611,7 +611,7 @@ under the License. - + attachVolume true @@ -627,7 +627,7 @@ under the License. - + detachVolume true @@ -639,7 +639,7 @@ under the License. - + createSnapshot true @@ -661,7 +661,7 @@ under the License. - + select path from volumes true @@ -679,7 +679,7 @@ under the License. - + listvdi.sh @@ -694,8 +694,8 @@ under the License. detached_volumepath - - + + deleteVolume Delete detached volume @@ -707,7 +707,7 @@ under the License. - + sleep.sh @@ -718,9 +718,9 @@ under the License. expunge.interval - - - + + + sleep.sh @@ -731,8 +731,8 @@ under the License. expunge.interval - - + + listvdi.sh true @@ -749,7 +749,7 @@ under the License. - + listvdi.sh @@ -765,9 +765,9 @@ under the License. snapshotpath - - - + + + stopRouter @@ -778,8 +778,8 @@ under the License. router_id - - + + deleteUser Deleting the user @@ -789,8 +789,8 @@ under the License. userid - - + + listvdi.sh true @@ -807,7 +807,7 @@ under the License. - + listvdi.sh true @@ -823,10 +823,10 @@ under the License. snapshotpath - - - - + + + + @@ -843,7 +843,7 @@ under the License. - + ms.sh @@ -876,8 +876,8 @@ under the License. - - + + deployVirtualMachine true @@ -894,7 +894,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -927,7 +927,7 @@ under the License. - + listVolumes @@ -947,7 +947,7 @@ under the License. - + stopVirtualMachine Stop virtual machine @@ -958,7 +958,7 @@ under the License. - + createTemplate Create template from root volume @@ -986,8 +986,8 @@ under the License. - - + + deployVirtualMachine Deploying virtual machine from the private template @@ -1003,7 +1003,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid privatetemplateid @@ -1040,7 +1040,7 @@ under the License. - + listHosts List host where vm is running @@ -1057,7 +1057,7 @@ under the License. - + select install_path from template_spool_ref true @@ -1075,7 +1075,7 @@ under the License. - + listvdi.sh @@ -1091,7 +1091,7 @@ under the License. - + destroyVirtualMachine Destroy virtual machine created from the private template @@ -1102,7 +1102,7 @@ under the License. - + sleep.sh @@ -1113,8 +1113,8 @@ under the License. 240 - - + + listvdi.sh true @@ -1144,7 +1144,7 @@ under the License. - + deleteTemplate Delete private template @@ -1155,7 +1155,7 @@ under the License. - + updateConfiguration Updating storage.cleanup.interval @@ -1170,7 +1170,7 @@ under the License. - + ms.sh @@ -1202,6 +1202,6 @@ under the License. - - + + diff --git a/test/metadata/func/external_firewall.xml b/test/metadata/func/external_firewall.xml index afb92446bed..d0f7efb7062 100644 --- a/test/metadata/func/external_firewall.xml +++ b/test/metadata/func/external_firewall.xml @@ -61,7 +61,7 @@ under the License. - + createNetwork @@ -99,7 +99,7 @@ under the License. - + deployVirtualMachine deploying virtual machine @@ -144,10 +144,10 @@ under the License. - - - - + + + + associateIpAddress Associating first public ip address @@ -169,14 +169,14 @@ under the License. id nonsourcenatpublicip1id - + ipaddress nonsourcenatpublicip1 - + associateIpAddress Associating second public ip address @@ -198,15 +198,15 @@ under the License. id nonsourcenatpublicip2id - + ipaddress nonsourcenatpublicip2 - - + + enableStaticNat Enable Static NAT - 1 @@ -214,11 +214,11 @@ under the License. ipaddressid nonsourcenatpublicip1id - + virtualmachineid vmid - + @@ -250,7 +250,7 @@ under the License. - + sleep.sh @@ -261,8 +261,8 @@ under the License. 120 - - + + ssh.sh @@ -282,9 +282,9 @@ under the License. - - - + + + createLoadBalancerRule Creating load balancer rule @@ -324,8 +324,8 @@ under the License. lbid - - + + assignToLoadBalancerRule Assigning vm to the load balancer @@ -339,8 +339,8 @@ under the License. vmid - - + + deleteLoadBalancerRule Deleting the load balancer @@ -350,8 +350,8 @@ under the License. lbid - - + + disassociateIpAddress Dissociating first public ip address @@ -362,7 +362,7 @@ under the License. - + disassociateIpAddress Dissociating second public ip address @@ -373,7 +373,7 @@ under the License. - + deleteAccount Delete account @@ -383,7 +383,7 @@ under the License. accountid - - - + + + diff --git a/test/metadata/func/flatnetwork.xml b/test/metadata/func/flatnetwork.xml index 60866537b01..70648b312d3 100644 --- a/test/metadata/func/flatnetwork.xml +++ b/test/metadata/func/flatnetwork.xml @@ -56,7 +56,7 @@ under the License. - + registerUserKeys Registering the user @@ -77,7 +77,7 @@ under the License. - + createNetworkGroup true @@ -89,7 +89,7 @@ under the License. - + authorizeNetworkGroupIngress Authorizing network group ingress @@ -124,7 +124,7 @@ under the License. - + deployVirtualMachine true @@ -158,7 +158,7 @@ under the License. - + deployVirtualMachine true @@ -187,8 +187,8 @@ under the License. vmid1 - - + + createVolume true @@ -213,8 +213,8 @@ under the License. - - + + attachVolume true @@ -230,7 +230,7 @@ under the License. - + sleep.sh @@ -241,8 +241,8 @@ under the License. 120 - - + + ssh.sh @@ -258,7 +258,7 @@ under the License. - + sleep.sh @@ -269,8 +269,8 @@ under the License. 60 - - + + stopVirtualMachine true @@ -282,7 +282,7 @@ under the License. - + startVirtualMachine true @@ -294,7 +294,7 @@ under the License. - + rebootVirtualMachine true @@ -318,7 +318,7 @@ under the License. - + attachVolume true @@ -334,8 +334,8 @@ under the License. - - + + stopVirtualMachine true @@ -347,7 +347,7 @@ under the License. - + startVirtualMachine true @@ -359,7 +359,7 @@ under the License. - + rebootVirtualMachine true @@ -383,8 +383,8 @@ under the License. - - + + deleteVolume true @@ -396,7 +396,7 @@ under the License. - + createVolume true @@ -421,8 +421,8 @@ under the License. - - + + attachVolume true @@ -438,8 +438,8 @@ under the License. - - + + revokeNetworkGroupIngress Revoking network group igress @@ -474,8 +474,8 @@ under the License. - - + + ssh.sh @@ -492,7 +492,7 @@ under the License. - + destroyVirtualMachine true @@ -504,7 +504,7 @@ under the License. - + sleep.sh @@ -515,10 +515,10 @@ under the License. 120 - - - - + + + + deleteUser Deleting the user @@ -528,6 +528,6 @@ under the License. userid - + diff --git a/test/metadata/func/ha.xml b/test/metadata/func/ha.xml index fa2ccb4b631..32ad2c22a51 100644 --- a/test/metadata/func/ha.xml +++ b/test/metadata/func/ha.xml @@ -33,7 +33,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -74,7 +74,7 @@ under the License. - + deployVirtualMachine Deploy ha enabled vm @@ -90,7 +90,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -131,8 +131,8 @@ under the License. - - + + updateVirtualMachine Update virtual machine @@ -147,7 +147,7 @@ under the License. - + listHosts @@ -165,7 +165,7 @@ under the License. - + listHosts List host with ha enabled machine @@ -201,8 +201,8 @@ under the License. instance_name - - + + select instance_name from vm_instance true @@ -219,7 +219,7 @@ under the License. instance_name_ha - + @@ -236,8 +236,8 @@ under the License. instance_name - - + + killvm.sh @@ -252,7 +252,7 @@ under the License. instance_name_ha - + @@ -265,7 +265,7 @@ under the License. 300 - + @@ -283,8 +283,8 @@ under the License. Stopped - - + + listVirtualMachines List virtual machine with ha enabled @@ -305,7 +305,7 @@ under the License. - + listHosts List host with ha enabled machine @@ -322,7 +322,7 @@ under the License. - + listvm.sh @@ -337,9 +337,9 @@ under the License. instance_name_ha - - - + + + @@ -352,7 +352,7 @@ under the License. - + destroyVirtualMachine Destroy ha enabled vm as a part of cleanup @@ -363,5 +363,5 @@ under the License. - - + + diff --git a/test/metadata/func/iso.xml b/test/metadata/func/iso.xml index 8e4c1c1c904..aa6d361e96c 100644 --- a/test/metadata/func/iso.xml +++ b/test/metadata/func/iso.xml @@ -37,8 +37,8 @@ under the License. secretkey - - + + deployVirtualMachine true @@ -54,7 +54,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -87,7 +87,7 @@ under the License. - + attachIso true @@ -103,7 +103,7 @@ under the License. - + listVirtualMachines 869-2 @@ -120,10 +120,10 @@ under the License. - - + + - + createAccount [Create the Account and a User for the account] @@ -186,8 +186,8 @@ under the License. - - +--> registerUserKeys @@ -249,8 +249,8 @@ under the License. secretkey - - + + attachIso true @@ -267,8 +267,8 @@ under the License. - - + + registerUserKeys @@ -288,8 +288,8 @@ under the License. secretkey - - + + destroyVirtualMachine @@ -299,7 +299,7 @@ under the License. - + attachIso 871 @@ -315,8 +315,8 @@ under the License. - - + + attachIso @@ -333,7 +333,7 @@ under the License. - + @@ -351,7 +351,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -384,7 +384,7 @@ under the License. - + attachIso true @@ -399,7 +399,7 @@ under the License. - + rebootVirtualMachine true @@ -411,7 +411,7 @@ under the License. - + listVirtualMachines 1073-2 @@ -428,7 +428,7 @@ under the License. - + stopVirtualMachine @@ -441,7 +441,7 @@ under the License. - + listVirtualMachines 1074-2 @@ -458,7 +458,7 @@ under the License. - + startVirtualMachine true @@ -470,7 +470,7 @@ under the License. - + listVirtualMachines 1074-4 @@ -487,8 +487,8 @@ under the License. - - + + listRouters @@ -515,8 +515,8 @@ under the License. routerid - - + + attachIso 1075 @@ -532,7 +532,7 @@ under the License. - + detachIso @@ -545,8 +545,8 @@ under the License. - - + + attachIso @@ -562,7 +562,7 @@ under the License. - + stopVirtualMachine true @@ -573,7 +573,7 @@ under the License. - + detachIso true @@ -585,8 +585,8 @@ under the License. - - + + destroyVirtualMachine @@ -598,7 +598,7 @@ under the License. - + deployVirtualMachine true @@ -615,7 +615,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globalisoid @@ -652,7 +652,7 @@ under the License. - + destroyVirtualMachine @@ -664,7 +664,7 @@ under the License. - + deployVirtualMachine true @@ -681,7 +681,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globalisoid @@ -718,7 +718,7 @@ under the License. - + detachIso true @@ -730,7 +730,7 @@ under the License. - + rebootVirtualMachine true @@ -742,7 +742,7 @@ under the License. - + listVirtualMachines 1079-4 @@ -759,9 +759,9 @@ under the License. - - - + + + destroyVirtualMachine @@ -789,7 +789,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globalisoid @@ -826,7 +826,7 @@ under the License. - + detachIso true @@ -838,7 +838,7 @@ under the License. - + stopVirtualMachine true @@ -850,7 +850,7 @@ under the License. - + startVirtualMachine true @@ -862,7 +862,7 @@ under the License. - + listVirtualMachines 1080-4 @@ -879,8 +879,8 @@ under the License. - - + + deleteUser @@ -892,7 +892,7 @@ under the License. - + destroyVirtualMachine Destroying vm as a part of cleanup @@ -902,6 +902,6 @@ under the License. vmid - - + + diff --git a/test/metadata/func/loadbalancers.xml b/test/metadata/func/loadbalancers.xml index cd983148c8f..87f2a0f9899 100644 --- a/test/metadata/func/loadbalancers.xml +++ b/test/metadata/func/loadbalancers.xml @@ -34,7 +34,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -58,7 +58,7 @@ under the License. networkids globalnetworkid - + @@ -67,7 +67,7 @@ under the License. - + associateIpAddress [Acquire a non-Source NAT IP Address for the Admin Account] @@ -85,14 +85,14 @@ under the License. 1 - + ipaddress nonsourcenatpublicip - + createLoadBalancerRule [Create Load Balancing Rule -ve test (Private Port 0)] @@ -127,8 +127,8 @@ under the License. 1 - - + + createLoadBalancerRule [Create Load Balancing Rule -ve test (Private Port 65536)] @@ -163,8 +163,8 @@ under the License. 1 - - + + createLoadBalancerRule [Create Load Balancing Rule -ve test (Private Port 65536)] @@ -199,8 +199,8 @@ under the License. 1 - - + + createLoadBalancerRule [Create Load Balancing Rule -ve test (Public Port 65536)] @@ -235,8 +235,8 @@ under the License. 1 - - + + createLoadBalancerRule @@ -270,8 +270,8 @@ under the License. 1 - - + + createLoadBalancerRule 919-1 [Create Load Balancing Rule - 2] @@ -304,8 +304,8 @@ under the License. 1 - - + + createLoadBalancerRule 919-2 [Create Load Balancing Rule - 3] @@ -338,8 +338,8 @@ under the License. 1 - - + + createLoadBalancerRule true @@ -373,7 +373,7 @@ under the License. 1 - + @@ -411,8 +411,8 @@ under the License. 1 - - + + createLoadBalancerRule 920-2 [Create a Load Balancer Rule with the Same Name] @@ -446,7 +446,7 @@ under the License. 1 - + @@ -483,7 +483,7 @@ under the License. 1 - + createAccount @@ -543,16 +543,16 @@ under the License. account accountvar - + id userid - + - +--> registerUserKeys [Register User Keys] @@ -615,8 +615,8 @@ under the License. secretkey - - + + createNetwork [Creating default network] @@ -653,7 +653,7 @@ under the License. - + deployVirtualMachine [Deploy Virtual Machine in that Account] @@ -670,7 +670,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -686,7 +686,7 @@ under the License. account accountvar - + domainid 1 @@ -694,7 +694,7 @@ under the License. networkids networkid - + @@ -703,7 +703,7 @@ under the License. - + associateIpAddress [Acquire a Non-Source NAT IP Address] @@ -722,14 +722,14 @@ under the License. 1 - + ipaddress nonsourcenatpublicip1 - + createLoadBalancerRule [Creating a LB Rule with the same name of a rule in another account] @@ -765,7 +765,7 @@ under the License. 1 - + @@ -803,8 +803,8 @@ under the License. - - + + createLoadBalancerRule @@ -840,8 +840,8 @@ under the License. admin - - + + associateIpAddress @@ -860,14 +860,14 @@ under the License. 1 - + ipaddress nonsourcenatpublicip - + createLoadBalancerRule 924 [Create Load Balancer Rule from non-Source NAT IP Address] @@ -900,8 +900,8 @@ under the License. 1 - - + + deleteLoadBalancerRule @@ -914,7 +914,7 @@ under the License. 10000 - + @@ -926,7 +926,7 @@ under the License. accountid - + createAccount @@ -987,16 +987,16 @@ under the License. account accountvar - + id userid - + - +--> registerUserKeys [Register User Keys] @@ -1059,8 +1059,8 @@ under the License. secretkey - - + + createNetwork [Creating default network] @@ -1097,7 +1097,7 @@ under the License. - + deployVirtualMachine [Deploy Virtual Machine in the account created] @@ -1114,7 +1114,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -1137,7 +1137,7 @@ under the License. - + associateIpAddress [Acquire a non-Source NAT IP Address for that account] @@ -1156,14 +1156,14 @@ under the License. 1 - + ipaddress nonsourcenatpublicip1 - + createLoadBalancerRule [Create Load Balancer Rule with the acquired Ip address] @@ -1204,8 +1204,8 @@ under the License. lbid1 - - + + createLoadBalancerRule [Create a Second Load Balancer Rule with the Acquired Ip Address] @@ -1238,7 +1238,7 @@ under the License. domainid 1 - + @@ -1246,8 +1246,8 @@ under the License. lbid2 - - + + createLoadBalancerRule [Create a Third Load Balancer Rule with the Acquired Ip Address] @@ -1288,8 +1288,8 @@ under the License. lbid3 - - + + deleteLoadBalancerRule 926 [Admin allowed to Delete Load Balancer Rule of another account via 8096 port] @@ -1312,7 +1312,7 @@ under the License. accountid - + createAccount @@ -1373,16 +1373,16 @@ under the License. account accountvar - + id userid - + - +--> registerUserKeys [Register User Keys] @@ -1445,8 +1445,8 @@ under the License. secretkey - - + + deleteLoadBalancerRule [Admin allowed to Delete Load Balancer Rule of another account via 8080 port] @@ -1459,8 +1459,8 @@ under the License. - - + + deleteAccount @@ -1471,8 +1471,8 @@ under the License. accountid - - + + createAccount [Create the Account and a User for the account] @@ -1486,7 +1486,7 @@ under the License. 0 - domainid + domainid 1 @@ -1532,16 +1532,16 @@ under the License. account accountvar - + id userid - + - +--> registerUserKeys [Register User Keys] @@ -1609,7 +1609,7 @@ under the License. - + createNetwork [Creating default network] @@ -1646,7 +1646,7 @@ under the License. - + deleteLoadBalancerRule true @@ -1659,7 +1659,7 @@ under the License. - + deployVirtualMachine @@ -1676,7 +1676,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -1709,7 +1709,7 @@ under the License. - + deployVirtualMachine [Deploy a Virtual Machine-2 to check multiple VMs - LB assignment] @@ -1725,7 +1725,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -1758,7 +1758,7 @@ under the License. - + associateIpAddress [Acquire a non-Source NAT IP address] @@ -1776,14 +1776,14 @@ under the License. 1 - + ipaddress nonsourcenatpublicip - + createLoadBalancerRule [Create a Load Balancer Rule] @@ -1822,8 +1822,8 @@ under the License. lbid - - + + assignToLoadBalancerRule 939 [Assign the First VM to the Load Balancing Rule] @@ -1837,8 +1837,8 @@ under the License. vmid1 - - + + assignToLoadBalancerRule 939 [Assign the Second VM to the Load Balancing Rule] @@ -1852,8 +1852,8 @@ under the License. vmid2 - - + + createLoadBalancerRule @@ -1887,8 +1887,8 @@ under the License. 1 - - + + createLoadBalancerRule 978 [Creation of Second LoadBalancer Rule should Fail] @@ -1922,8 +1922,8 @@ under the License. 1 - - + + listRouters @@ -1950,8 +1950,8 @@ under the License. sourcenatpublicip - - + + createLoadBalancerRule 1017 [Create Load Balancer Rule with Source NAT IP Address] @@ -1984,9 +1984,9 @@ under the License. 1 - - - + + + destroyVirtualMachine @@ -1997,8 +1997,8 @@ under the License. vmid1 - - + + destroyVirtualMachine [Destroy vm as a part of cleanup] @@ -2009,7 +2009,7 @@ under the License. - + destroyVirtualMachine [Destroy vm as a part of cleanup] @@ -2020,7 +2020,7 @@ under the License. - + deleteAccount [Deleting Account as a part of cleanup] @@ -2030,7 +2030,7 @@ under the License. accountid - - - + + + diff --git a/test/metadata/func/localstorage_volume_test.xml b/test/metadata/func/localstorage_volume_test.xml index f268495b171..aa1bbcb4292 100644 --- a/test/metadata/func/localstorage_volume_test.xml +++ b/test/metadata/func/localstorage_volume_test.xml @@ -48,7 +48,7 @@ under the License. - + createVolume @@ -73,11 +73,11 @@ under the License. diskofferingid globaldiskofferingid - + - - + + listVolumes @@ -98,7 +98,7 @@ under the License. - + detachVolume 1032 @@ -110,9 +110,9 @@ under the License. - - - + + + listVolumes @@ -133,7 +133,7 @@ under the License. - + detachVolume 1034 @@ -145,9 +145,9 @@ under the License. - - - - - + + + + + diff --git a/test/metadata/func/mgmtvmsync.xml b/test/metadata/func/mgmtvmsync.xml index 9972b5555dd..f2b884e8a21 100644 --- a/test/metadata/func/mgmtvmsync.xml +++ b/test/metadata/func/mgmtvmsync.xml @@ -22,12 +22,12 @@ under the License. deployVirtualMachine deploy First VM test case - + diskofferingid globaldiskofferingid - + serviceofferingid globalserviceofferingid @@ -39,7 +39,7 @@ under the License. zoneid globalzoneid - + displayname FirstVM @@ -80,12 +80,12 @@ under the License. deployVirtualMachine deploy Second VM test case - + diskofferingid globaldiskofferingid - + serviceofferingid globalserviceofferingid @@ -97,7 +97,7 @@ under the License. zoneid globalzoneid - + displayname SecondVM @@ -172,7 +172,7 @@ under the License. haenable false - + @@ -194,7 +194,7 @@ under the License. - + @@ -231,8 +231,8 @@ under the License. instance_name_1st - - + + select instance_name from vm_instance true @@ -269,7 +269,7 @@ under the License. - + @@ -277,13 +277,13 @@ under the License. killvm.sh Kill the First HA disabled VM - + n instance_name_1st - + h ipaddress_1st @@ -295,13 +295,13 @@ under the License. killvm.sh Kill the Second HA disabled VM - + n instance_name_2nd - + h ipaddress_2nd @@ -320,7 +320,7 @@ under the License. 100 - + @@ -352,7 +352,7 @@ under the License. 100 - + @@ -369,10 +369,10 @@ under the License. state Stopped - + - - + + listVirtualMachines Check the stopped status of Second virtual machine with ha disabled @@ -387,7 +387,7 @@ under the License. state Stopped - + @@ -403,7 +403,7 @@ under the License. - + destroyVirtualMachine Destroy the Second vm as a part of cleanup @@ -421,12 +421,12 @@ under the License. deployVirtualMachine deploy VM test case - + diskofferingid globaldiskofferingid - + serviceofferingid globalserviceofferingid @@ -438,7 +438,7 @@ under the License. zoneid globalzoneid - + displayname FirstVM @@ -493,7 +493,7 @@ under the License. - + @@ -513,7 +513,7 @@ under the License. - + ms.sh @@ -537,13 +537,13 @@ under the License. shutdown.sh Shut down the VM Test Case - + n instance_name_1st - + h ipaddress_1st @@ -562,7 +562,7 @@ under the License. 60 - + @@ -593,7 +593,7 @@ under the License. 60 - + @@ -613,7 +613,7 @@ under the License. - + listHosts List host that is presumed to have the VM Test Case @@ -630,7 +630,7 @@ under the License. - + listvm.sh @@ -646,6 +646,6 @@ under the License. instance_name_1st - + diff --git a/test/metadata/func/portforwarding.xml b/test/metadata/func/portforwarding.xml index e591c0fdeeb..2c077a60cb5 100644 --- a/test/metadata/func/portforwarding.xml +++ b/test/metadata/func/portforwarding.xml @@ -78,12 +78,12 @@ under the License. account accountname - + id userid - + @@ -166,7 +166,7 @@ under the License. ---> +--> deployVirtualMachine [Deploy Virtual Machine in the created Account] @@ -202,7 +202,7 @@ under the License. networkids networkid - + @@ -237,7 +237,7 @@ under the License. 1 - + id routerid @@ -247,8 +247,8 @@ under the License. sourcenatpublicip - - + + associateIpAddress [Acquire a new IP Address for the Account] @@ -266,14 +266,14 @@ under the License. 1 - + ipaddress nonsourcenatpublicip - + createPortForwardingRule @@ -313,7 +313,7 @@ under the License. - + createPortForwardingRule @@ -353,7 +353,7 @@ under the License. - + createPortForwardingRule @@ -393,7 +393,7 @@ under the License. - + createPortForwardingRule @@ -433,9 +433,9 @@ under the License. - + - +--> createPortForwardingRule @@ -514,7 +514,7 @@ under the License. - + createPortForwardingRule 328 [Create Port Forwarding Rule with same parameters with UDP protocol] @@ -553,7 +553,7 @@ under the License. - + associateIpAddress @@ -572,14 +572,14 @@ under the License. 1 - + ipaddress nonsourcenatpublicip1 - + createPortForwardingRule [Create Port Forwarding Rule with the acquired IP Address] @@ -617,14 +617,14 @@ under the License. 1 - + id portforwardingruleid1 - + deletePortForwardingRule 329 [Delete Port Forwarding Rule] @@ -643,7 +643,7 @@ under the License. - + listPortForwardingRules 329 [Try to list the Deleted Port Forwarding Rules (del pf rule exercise end)] @@ -696,14 +696,14 @@ under the License. 1 - + id portforwardingruleid1 - + disassociateIpAddress [Release the Non-Source NAT IP Address] @@ -722,7 +722,7 @@ under the License. - + listPortForwardingRules 331 [Try to List the Port Forwarding Rules created from Released IP Address(del ip exercise end)] @@ -736,13 +736,13 @@ under the License. - + createLoadBalancerRule [Create Load Balancing Rule Inorder to test PF-LBrule collision (pf-lb collision exercise begin)] - + privateip vmipaddress @@ -781,13 +781,13 @@ under the License. - - + + assignToLoadBalancerRule [Assign the Created LB Rule to the VM created in the Test] - + virtualmachineids vmid @@ -797,14 +797,14 @@ under the License. - + createPortForwardingRule [Create Port Forwarding Rule to test PF-LBrule collision (pf-lb rule collison exercise end)] 332 true - + privateip vmipaddress @@ -838,7 +838,7 @@ under the License. - + stopRouter @@ -850,12 +850,12 @@ under the License. - + createPortForwardingRule 336 [Create Port Forwarding Rule with the Source NAT IP Address] - + privateip vmipaddress @@ -889,7 +889,7 @@ under the License. - + startRouter [Start the DomR (add new pfrule when domR is stopped exercise end)] @@ -900,14 +900,14 @@ under the License. - + createPortForwardingRule 337 [Create Port Forwarding Rule with the Source NAT IP Address] - + privateip vmipaddress @@ -940,16 +940,16 @@ under the License. 1 - + id portforwardingruleid_forremove - - + stopRouter @@ -1041,7 +1041,7 @@ under the License. - + startRouter [Start the DomR (del pf rule exercise end)] @@ -1063,6 +1063,6 @@ under the License. accountid - + diff --git a/test/metadata/func/private_templates.xml b/test/metadata/func/private_templates.xml index d733c7e7f50..c56eec4a5dc 100644 --- a/test/metadata/func/private_templates.xml +++ b/test/metadata/func/private_templates.xml @@ -77,12 +77,12 @@ under the License. account accountvar - + id userid - + @@ -124,7 +124,7 @@ under the License. ---> +--> registerUserKeys @@ -143,8 +143,8 @@ under the License. secretkey - - + + createNetwork [Creating default network] @@ -181,7 +181,7 @@ under the License. - + deployVirtualMachine true @@ -197,7 +197,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -230,8 +230,8 @@ under the License. - - + + listVolumes @@ -251,8 +251,8 @@ under the License. - - + + stopVirtualMachine @@ -264,7 +264,7 @@ under the License. - + createTemplate true @@ -294,8 +294,8 @@ under the License. - - + + startVirtualMachine @@ -307,7 +307,7 @@ under the License. - + createTemplate true @@ -331,7 +331,7 @@ under the License. - + destroyVirtualMachine @@ -343,7 +343,7 @@ under the License. - + createTemplate true @@ -367,7 +367,7 @@ under the License. - + deployVirtualMachine @@ -385,7 +385,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid privatetemplateid @@ -422,7 +422,7 @@ under the License. - + listVolumes @@ -443,7 +443,7 @@ under the License. - + stopVirtualMachine true @@ -454,7 +454,7 @@ under the License. - + createTemplate true @@ -483,7 +483,7 @@ under the License. - + deployVirtualMachine true @@ -500,7 +500,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid privatetemplateid @@ -537,7 +537,7 @@ under the License. - + createTemplate @@ -567,7 +567,7 @@ under the License. - + createTemplate true @@ -591,8 +591,8 @@ under the License. - - + + @@ -654,12 +654,12 @@ under the License. account accountvar1 - + id userid1 - + @@ -697,7 +697,7 @@ under the License. ---> +--> registerUserKeys @@ -716,8 +716,8 @@ under the License. secretkey - - + + createNetwork [Creating default network] @@ -754,7 +754,7 @@ under the License. - + deployVirtualMachine true @@ -770,7 +770,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -803,7 +803,7 @@ under the License. - + stopVirtualMachine true @@ -814,7 +814,7 @@ under the License. - + listVolumes @@ -834,7 +834,7 @@ under the License. - + createTemplate true @@ -858,9 +858,9 @@ under the License. - - - + + + deleteAccount Deleting First Account as a part of cleanup @@ -871,7 +871,7 @@ under the License. - + deleteAccount Deleting Second Account as a part of cleanup @@ -882,8 +882,8 @@ under the License. - - - + diff --git a/test/metadata/func/regression.xml b/test/metadata/func/regression.xml index 2b47dadcfcf..eebf13ff74c 100644 --- a/test/metadata/func/regression.xml +++ b/test/metadata/func/regression.xml @@ -170,7 +170,7 @@ under the License. accountname - + id @@ -215,12 +215,12 @@ under the License. domain ROOT - + - - - + + + createUser @@ -257,9 +257,9 @@ under the License. domainid 1 - + - + id @@ -318,7 +318,7 @@ under the License. accountname - + id @@ -465,7 +465,7 @@ under the License. usersecretkey - + id @@ -596,11 +596,11 @@ under the License. - + - @@ -1480,7 +1480,7 @@ under the License. true - + success @@ -1505,7 +1505,7 @@ under the License. 1 - + template 0 @@ -1563,7 +1563,7 @@ under the License. size - debian50templatesize + debian50templatesize account @@ -1596,7 +1596,7 @@ under the License. domainid 1 - + @@ -1804,7 +1804,7 @@ under the License. offerha false - + @@ -1840,7 +1840,7 @@ under the License. vlan - guestcidraddress + guestcidraddress networktype @@ -1886,7 +1886,7 @@ under the License. - + deployVirtualMachine @@ -1903,7 +1903,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -1995,8 +1995,8 @@ under the License. - - + + listVirtualMachines @@ -2077,8 +2077,8 @@ under the License. group1 - - + + listRouters @@ -2169,9 +2169,9 @@ under the License. Running - - - + + + associateIpAddress 546 @@ -2189,7 +2189,7 @@ under the License. 1 - + id nonsourcenatpublicipid @@ -2215,9 +2215,9 @@ under the License. - + - + createLoadBalancerRule @@ -2286,9 +2286,9 @@ under the License. accountname - - - + + + listLoadBalancerRules @@ -2305,8 +2305,8 @@ under the License. - - + + assignToLoadBalancerRule @@ -2321,8 +2321,8 @@ under the License. vmid - - + + deleteLoadBalancerRule @@ -2333,17 +2333,17 @@ under the License. lbid - - + + listLoadBalancerRules true - + createLoadBalancerRule - + name lbname @@ -2406,8 +2406,8 @@ under the License. accountname - - + + assignToLoadBalancerRule @@ -2420,9 +2420,9 @@ under the License. vmid - - - + + + listLoadBalancerRuleInstances @@ -2434,8 +2434,8 @@ under the License. lbid - - + + removeFromLoadBalancerRule @@ -2454,8 +2454,8 @@ under the License. accountname - - + + listLoadBalancerRuleInstances true @@ -2465,13 +2465,13 @@ under the License. lbid - - + + - + - - + + rebootVirtualMachine @@ -2483,7 +2483,7 @@ under the License. - + listVirtualMachines @@ -2503,8 +2503,8 @@ under the License. - - + + stopVirtualMachine @@ -2516,7 +2516,7 @@ under the License. - + listVirtualMachines @@ -2536,9 +2536,9 @@ under the License. - - - + + + @@ -2560,7 +2560,7 @@ under the License. - + listVolumes @@ -2580,7 +2580,7 @@ under the License. - + detachVolume @@ -2591,7 +2591,7 @@ under the License. - + listVolumes true @@ -2606,8 +2606,8 @@ under the License. - - + + createVolume @@ -2631,7 +2631,7 @@ under the License. diskofferingid globaldiskofferingid - + @@ -2639,9 +2639,9 @@ under the License. datavolumeid - - - + + + attachVolume @@ -2656,7 +2656,7 @@ under the License. - + listVolumes false @@ -2671,8 +2671,8 @@ under the License. - - + + createVolume @@ -2695,7 +2695,7 @@ under the License. diskofferingid globaldiskofferingid - + @@ -2703,8 +2703,8 @@ under the License. datavolumeid1 - - + + deleteVolume @@ -2713,8 +2713,8 @@ under the License. datavolumeid1 - - + + listVolumes true @@ -2724,10 +2724,10 @@ under the License. datavolumeid1 - - + - + + createSnapshot @@ -2758,7 +2758,7 @@ under the License. - + listSnapshots false @@ -2769,7 +2769,7 @@ under the License. - + deleteSnapshot @@ -2781,7 +2781,7 @@ under the License. - + listSnapshots @@ -2794,7 +2794,7 @@ under the License. - + createSnapshot @@ -2811,7 +2811,7 @@ under the License. - + createTemplate 958 @@ -2859,7 +2859,7 @@ under the License. - + listTemplates @@ -2900,7 +2900,7 @@ under the License. - + updateTemplate @@ -2920,7 +2920,7 @@ under the License. - + listTemplates List updated template @@ -2941,8 +2941,8 @@ under the License. - - + + listTemplatePermissions @@ -2964,8 +2964,8 @@ under the License. - - + + updateTemplatePermissions @@ -2981,7 +2981,7 @@ under the License. - + listTemplates List template with updated permission @@ -3002,8 +3002,8 @@ under the License. - - + + deleteTemplate @@ -3015,7 +3015,7 @@ under the License. - + listTemplates true @@ -3030,9 +3030,9 @@ under the License. - - + + changeServiceForVirtualMachine @@ -3048,7 +3048,7 @@ under the License. - + listVirtualMachines @@ -3072,8 +3072,8 @@ under the License. - - + + resetPasswordForVirtualMachine @@ -3094,7 +3094,7 @@ under the License. - + listVirtualMachines @@ -3114,7 +3114,7 @@ under the License. - + listVirtualMachines @@ -3135,7 +3135,7 @@ under the License. - + startVirtualMachine 18 @@ -3191,7 +3191,7 @@ under the License. - + listVirtualMachines @@ -3211,7 +3211,7 @@ under the License. - + updateVirtualMachine @@ -3226,7 +3226,7 @@ under the License. - + listVirtualMachines 21 @@ -3247,7 +3247,7 @@ under the License. - + updateVirtualMachine @@ -3261,7 +3261,7 @@ under the License. - + listVirtualMachines 21 @@ -3282,9 +3282,9 @@ under the License. - - - + + + listPublicIpAddresses @@ -3298,8 +3298,8 @@ under the License. - - + + disassociateIpAddress @@ -3315,7 +3315,7 @@ under the License. - + rebootRouter @@ -3327,7 +3327,7 @@ under the License. - + stopRouter @@ -3339,7 +3339,7 @@ under the License. - + startRouter @@ -3414,7 +3414,7 @@ under the License. - + listEvents @@ -3426,7 +3426,7 @@ under the License. - + destroyVirtualMachine @@ -3438,7 +3438,7 @@ under the License. - + listVirtualMachines @@ -3454,7 +3454,7 @@ under the License. - + recoverVirtualMachine @@ -3466,7 +3466,7 @@ under the License. - + listVirtualMachines @@ -3486,14 +3486,14 @@ under the License. - + listConfigurations 549 false - - + + listConfigurations @@ -3506,8 +3506,8 @@ under the License. - - + + updateConfiguration @@ -3523,7 +3523,7 @@ under the License. - + listConfigurations @@ -3538,9 +3538,9 @@ under the License. 35 - - - + + + listPods @@ -3549,9 +3549,9 @@ under the License. - - - + + + createAccount @@ -3591,7 +3591,7 @@ under the License. - + listAccounts @@ -3606,8 +3606,8 @@ under the License. accountid1 - - + + updateAccount 951 @@ -3626,7 +3626,7 @@ under the License. - + listAccounts @@ -3645,8 +3645,8 @@ under the License. updatedname - - + + disableAccount @@ -3664,10 +3664,10 @@ under the License. lock true - + - - + + @@ -3683,10 +3683,10 @@ under the License. 1 - - - - + + + + updateResourceLimit 953 @@ -3722,9 +3722,9 @@ under the License. 300 - - - + + + listResourceLimits true @@ -3745,9 +3745,9 @@ under the License. 300 - - - + + + updateResourceLimit 954 @@ -3765,8 +3765,8 @@ under the License. 65 - - + + listResourceLimits @@ -3785,9 +3785,9 @@ under the License. 65 - - - + + + createDiskOffering @@ -3833,9 +3833,9 @@ under the License. - - - + + + listDiskOfferings @@ -3851,9 +3851,9 @@ under the License. newdisk - - - + + + updateDiskOffering @@ -3867,8 +3867,8 @@ under the License. newdiskname - - + + listDiskOfferings @@ -3887,9 +3887,9 @@ under the License. newdiskname - + + - deleteDiskOffering @@ -3900,8 +3900,8 @@ under the License. diskid - - + + listDiskOfferings true @@ -3915,7 +3915,7 @@ under the License. diskid - + @@ -3923,29 +3923,29 @@ under the License. 750 false - + listCapacity 974 false - + listHosts false 975 - - + + listIsos 960 true - + createServiceOffering @@ -3978,7 +3978,7 @@ under the License. - + listServiceOfferings @@ -4013,8 +4013,8 @@ under the License. - - + + updateServiceOffering @@ -4033,8 +4033,8 @@ under the License. - - + + listServiceOfferings @@ -4069,8 +4069,8 @@ under the License. - - + + deleteServiceOffering @@ -4082,7 +4082,7 @@ under the License. - + listServiceOfferings 963-2 @@ -4094,9 +4094,9 @@ under the License. - - - + + + registerIso Register iso @@ -4164,8 +4164,8 @@ under the License. - - + + listIsos List isos @@ -4177,7 +4177,7 @@ under the License. - + registerTemplate @@ -4258,8 +4258,8 @@ under the License. - - + + listSystemVms @@ -4337,7 +4337,7 @@ under the License. - + rebootSystemVm @@ -4349,7 +4349,7 @@ under the License. - + stopSystemVm @@ -4360,8 +4360,8 @@ under the License. consoleid - - + + startSystemVm @@ -4372,9 +4372,9 @@ under the License. consoleid - - - + + + deleteIso @@ -4398,7 +4398,7 @@ under the License. - + deleteTemplate @@ -4437,7 +4437,7 @@ under the License. accountid - - - + + + diff --git a/test/metadata/func/regression_events.properties b/test/metadata/func/regression_events.properties index 331536370d4..800248fb99f 100644 --- a/test/metadata/func/regression_events.properties +++ b/test/metadata/func/regression_events.properties @@ -5,9 +5,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/test/metadata/func/regression_new.xml b/test/metadata/func/regression_new.xml index 8db3593c0cd..3554e6e45a3 100644 --- a/test/metadata/func/regression_new.xml +++ b/test/metadata/func/regression_new.xml @@ -836,7 +836,7 @@ under the License. size - debian50templatesize + debian50templatesize account @@ -869,7 +869,7 @@ under the License. domainid 1 - + @@ -1269,7 +1269,7 @@ under the License. - + sleep.sh @@ -1280,7 +1280,7 @@ under the License. 60 - + @@ -2686,7 +2686,7 @@ under the License. serviceofferingid - globalserviceofferingid + globalserviceofferingid serviceofferingname @@ -2694,7 +2694,7 @@ under the License. serviceofferingdisplaytext - + @@ -2807,7 +2807,7 @@ under the License. destroyed false - + @@ -2902,7 +2902,7 @@ under the License. state Created - + account accountname @@ -3018,7 +3018,7 @@ under the License. state Created - + account accountname @@ -3066,7 +3066,7 @@ under the License. destroyed false - + @@ -3210,7 +3210,7 @@ under the License. intervaltype MANUAL - + @@ -3320,7 +3320,7 @@ under the License. - + createTemplate 958 [Create Template from Snapshot] @@ -3373,7 +3373,7 @@ under the License. isready true - + passwordenabled false @@ -3411,7 +3411,7 @@ under the License. domainid 1 - + @@ -3458,7 +3458,7 @@ under the License. hypervisor XenServer - + @@ -3539,11 +3539,11 @@ under the License. isextractable true - + - - + + sleep.sh @@ -3554,8 +3554,8 @@ under the License. 300 - - + + listTemplates @@ -3601,7 +3601,7 @@ under the License. format VHD - + isfeatured false @@ -3648,7 +3648,7 @@ under the License. domain ROOT - + @@ -3762,7 +3762,7 @@ under the License. 240 - + listTemplates @@ -3965,7 +3965,7 @@ under the License. - + - @@ -7355,7 +7355,7 @@ under the License. state Running - + @@ -7444,7 +7444,7 @@ under the License. state Running - + @@ -7466,7 +7466,7 @@ under the License. systemvmtype consoleproxy - + zoneid globalzoneid @@ -7524,7 +7524,7 @@ under the License. state Stopped - + @@ -7612,7 +7612,7 @@ under the License. state Running - + @@ -7638,7 +7638,7 @@ under the License. privateisoid - + diff --git a/test/metadata/func/regression_test.xml b/test/metadata/func/regression_test.xml index 18dc7856d9c..53baea191b2 100644 --- a/test/metadata/func/regression_test.xml +++ b/test/metadata/func/regression_test.xml @@ -93,7 +93,7 @@ under the License. - + listAccounts @@ -200,8 +200,8 @@ under the License. false - - + + updateUser @@ -216,8 +216,8 @@ under the License. newuser - - + + listUsers @@ -271,10 +271,10 @@ under the License. created - + - - + + lockUser @@ -285,8 +285,8 @@ under the License. userid - - + + disableUser @@ -298,9 +298,9 @@ under the License. - - + + enableUser @@ -311,8 +311,8 @@ under the License. userid - - + + createDomain @@ -333,9 +333,9 @@ under the License. newdomainid - + + - listDomains @@ -356,8 +356,8 @@ under the License. domainname - - + + updateDomain @@ -372,8 +372,8 @@ under the License. newdomainname - - + + listDomains false @@ -383,8 +383,8 @@ under the License. newdomainname - - + + deleteDomain @@ -395,8 +395,8 @@ under the License. newdomainid - - + + listTemplates @@ -440,7 +440,7 @@ under the License. - + listTemplates @@ -484,7 +484,7 @@ under the License. - + listServiceOfferings @@ -508,7 +508,7 @@ under the License. - + listServiceOfferings 876 @@ -552,13 +552,13 @@ under the License. - - + + listZones 39 - + 0 zone @@ -571,7 +571,7 @@ under the License. - + deployVirtualMachine @@ -588,7 +588,7 @@ under the License. diskofferingid 5 - + templateid globaltemplateid @@ -676,8 +676,8 @@ under the License. - - + + listVirtualMachines @@ -758,8 +758,8 @@ under the License. group1 - - + + listRouters @@ -851,9 +851,9 @@ under the License. Running - - - + + + +--> - + + + +--> + + --> + + --> - + + --> + + + --> + + --> - + + + --> + + + --> + + --> - + + --> + + + --> - + + --> + + + --> + + --> + + --> - + + --> + listVirtualMachines @@ -1516,8 +1516,8 @@ under the License. - - + + rebootVirtualMachine @@ -1529,7 +1529,7 @@ under the License. - + listVirtualMachines @@ -1549,8 +1549,8 @@ under the License. - - + + stopVirtualMachine @@ -1562,7 +1562,7 @@ under the License. - + listVirtualMachines @@ -1582,9 +1582,9 @@ under the License. - - - + + + @@ -1606,7 +1606,7 @@ under the License. - + listVolumes @@ -1626,7 +1626,7 @@ under the License. - + detachVolume @@ -1637,7 +1637,7 @@ under the License. - + listVolumes true @@ -1652,8 +1652,8 @@ under the License. - - + + createVolume @@ -1685,9 +1685,9 @@ under the License. datavolumeid - - - + + + attachVolume @@ -1702,7 +1702,7 @@ under the License. - + listVolumes false @@ -1717,8 +1717,8 @@ under the License. - - + + createVolume @@ -1749,8 +1749,8 @@ under the License. datavolumeid1 - - + + deleteVolume @@ -1759,8 +1759,8 @@ under the License. datavolumeid1 - - + + listVolumes true @@ -1770,10 +1770,10 @@ under the License. datavolumeid1 - - + - + + createSnapshot @@ -1804,7 +1804,7 @@ under the License. - + listSnapshots false @@ -1815,7 +1815,7 @@ under the License. - + deleteSnapshot @@ -1827,7 +1827,7 @@ under the License. - + listSnapshots @@ -1840,7 +1840,7 @@ under the License. - + createTemplate @@ -1889,7 +1889,7 @@ under the License. - + listTemplates @@ -1930,7 +1930,7 @@ under the License. - + updateTemplate @@ -1950,7 +1950,7 @@ under the License. - + listTemplates List updated template @@ -1971,8 +1971,8 @@ under the License. - - + + listTemplatePermissions @@ -1994,8 +1994,8 @@ under the License. - - + + updateTemplatePermissions @@ -2011,7 +2011,7 @@ under the License. - + listTemplates List template with updated permission @@ -2032,8 +2032,8 @@ under the License. - - + + deleteTemplate @@ -2045,7 +2045,7 @@ under the License. - + listTemplates true @@ -2060,9 +2060,9 @@ under the License. - - + + changeServiceForVirtualMachine @@ -2078,7 +2078,7 @@ under the License. - + listVirtualMachines @@ -2102,8 +2102,8 @@ under the License. - - + + +--> listVirtualMachines @@ -2165,7 +2165,7 @@ under the License. - + startVirtualMachine 18 @@ -2221,7 +2221,7 @@ under the License. - + listVirtualMachines @@ -2241,7 +2241,7 @@ under the License. - + updateVirtualMachine @@ -2256,7 +2256,7 @@ under the License. - + listVirtualMachines 21 @@ -2277,7 +2277,7 @@ under the License. - + updateVirtualMachine @@ -2291,7 +2291,7 @@ under the License. - + listVirtualMachines 21 @@ -2312,9 +2312,9 @@ under the License. - - - + + + - + --> + + --> rebootRouter @@ -2357,7 +2357,7 @@ under the License. - + stopRouter @@ -2369,7 +2369,7 @@ under the License. - + startRouter @@ -2445,7 +2445,7 @@ under the License. - + listEvents @@ -2457,7 +2457,7 @@ under the License. - + destroyVirtualMachine @@ -2469,7 +2469,7 @@ under the License. - + listVirtualMachines @@ -2485,7 +2485,7 @@ under the License. - + recoverVirtualMachine @@ -2497,7 +2497,7 @@ under the License. - + listVirtualMachines @@ -2517,14 +2517,14 @@ under the License. - + listConfigurations 549 false - - + + listConfigurations @@ -2537,8 +2537,8 @@ under the License. - - + + updateConfiguration @@ -2554,7 +2554,7 @@ under the License. - + listConfigurations @@ -2569,9 +2569,9 @@ under the License. 35 - - - + + + listPods @@ -2580,9 +2580,9 @@ under the License. - - - + + + createUser @@ -2619,7 +2619,7 @@ under the License. - + listAccounts @@ -2634,8 +2634,8 @@ under the License. accountid1 - - + + updateAccount 951 @@ -2654,7 +2654,7 @@ under the License. - + listAccounts @@ -2673,8 +2673,8 @@ under the License. updatedname - - + + lockAccount @@ -2689,8 +2689,8 @@ under the License. 1 - - + + disableAccount @@ -2705,8 +2705,8 @@ under the License. 1 - - + + @@ -2722,10 +2722,10 @@ under the License. 1 - - - - + + + + updateResourceLimit 953 @@ -2761,9 +2761,9 @@ under the License. 300 - - - + + + listResourceLimits true @@ -2784,9 +2784,9 @@ under the License. 300 - - - + + + updateResourceLimit 954 @@ -2804,8 +2804,8 @@ under the License. 65 - - + + listResourceLimits @@ -2824,9 +2824,9 @@ under the License. 65 - - - + + + createDiskOffering @@ -2876,9 +2876,9 @@ under the License. - - - + + + listDiskOfferings @@ -2894,9 +2894,9 @@ under the License. newdisk - - - + + + updateDiskOffering @@ -2910,8 +2910,8 @@ under the License. newdiskname - - + + listDiskOfferings @@ -2930,9 +2930,9 @@ under the License. newdiskname - + + - deleteDiskOffering @@ -2943,8 +2943,8 @@ under the License. diskid - - + + listDiskOfferings true @@ -2958,9 +2958,9 @@ under the License. diskid - + + - deleteUser @@ -2971,37 +2971,37 @@ under the License. userid - - + + listAlerts 750 false - + listCapacity 974 false - + listHosts false 975 - - + + listIsos 960 true - + createServiceOffering @@ -3034,7 +3034,7 @@ under the License. - + listServiceOfferings @@ -3069,8 +3069,8 @@ under the License. - - + + updateServiceOffering @@ -3089,8 +3089,8 @@ under the License. - - + + listServiceOfferings @@ -3125,8 +3125,8 @@ under the License. - - + + deleteServiceOffering @@ -3138,7 +3138,7 @@ under the License. - + listServiceOfferings 963-2 @@ -3150,9 +3150,9 @@ under the License. - - - + + + registerIso Register iso @@ -3220,8 +3220,8 @@ under the License. - - + + listIsos List isos @@ -3233,7 +3233,7 @@ under the License. - + registerTemplate @@ -3310,8 +3310,8 @@ under the License. - - + + listSystemVms @@ -3389,7 +3389,7 @@ under the License. - + rebootSystemVm @@ -3401,7 +3401,7 @@ under the License. - + stopSystemVm @@ -3412,8 +3412,8 @@ under the License. consoleid - - + + startSystemVm @@ -3424,9 +3424,9 @@ under the License. consoleid - - - + + + deleteIso @@ -3450,7 +3450,7 @@ under the License. - + deleteTemplate @@ -3479,5 +3479,5 @@ under the License. - + diff --git a/test/metadata/func/regression_user.xml b/test/metadata/func/regression_user.xml index aac4d585f07..c58c848e9d5 100644 --- a/test/metadata/func/regression_user.xml +++ b/test/metadata/func/regression_user.xml @@ -170,7 +170,7 @@ under the License. accountname - + id @@ -215,7 +215,7 @@ under the License. domain ROOT - + @@ -295,7 +295,7 @@ under the License. ---> +--> registerUserKeys @@ -314,8 +314,8 @@ under the License. secretkey - - + + createNetwork [Creating default network] @@ -352,7 +352,7 @@ under the License. - + listAccounts @@ -446,9 +446,9 @@ under the License. templateavailable - + + - deployVirtualMachine @@ -466,7 +466,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -490,7 +490,7 @@ under the License. networkids networkid - + @@ -542,8 +542,8 @@ under the License. - - + + listVirtualMachines @@ -597,8 +597,8 @@ under the License. group1 - - + + listRouters @@ -690,9 +690,9 @@ under the License. Running - - - + + + associateIpAddress true @@ -703,7 +703,7 @@ under the License. globalzoneid - + id nonsourcenatpublicipid @@ -729,9 +729,9 @@ under the License. - + - + createLoadBalancerRule @@ -797,9 +797,9 @@ under the License. accountname - - - + + + listLoadBalancerRules @@ -807,8 +807,8 @@ under the License. 988 false - - + + assignToLoadBalancerRule @@ -824,8 +824,8 @@ under the License. vmid - - + + deleteLoadBalancerRule @@ -837,19 +837,19 @@ under the License. lbid - - + + listLoadBalancerRules true true - + createLoadBalancerRule true - + name lbname @@ -912,8 +912,8 @@ under the License. accountname - - + + assignToLoadBalancerRule true @@ -927,9 +927,9 @@ under the License. vmid - - - + + + listLoadBalancerRuleInstances @@ -942,8 +942,8 @@ under the License. lbid - - + + removeFromLoadBalancerRule @@ -963,8 +963,8 @@ under the License. accountname - - + + listLoadBalancerRuleInstances true @@ -975,13 +975,13 @@ under the License. lbid - - + + - + + +--> + + +--> + +--> - + +--> + + +--> - + +--> + + +--> + +--> + +--> - + +--> + listVirtualMachines @@ -1322,8 +1322,8 @@ under the License. - - + + rebootVirtualMachine @@ -1336,7 +1336,7 @@ under the License. - + listVirtualMachines true @@ -1353,8 +1353,8 @@ under the License. - - + + stopVirtualMachine @@ -1367,7 +1367,7 @@ under the License. - + listVirtualMachines true @@ -1384,9 +1384,9 @@ under the License. - - - + + + @@ -1408,7 +1408,7 @@ under the License. - + listVolumes @@ -1428,7 +1428,7 @@ under the License. - + detachVolume @@ -1440,7 +1440,7 @@ under the License. - + listVolumes true @@ -1455,8 +1455,8 @@ under the License. - - + + createVolume @@ -1473,7 +1473,7 @@ under the License. diskofferingid globaldiskofferingid - + @@ -1481,9 +1481,9 @@ under the License. datavolumeid - - - + + + attachVolume @@ -1499,7 +1499,7 @@ under the License. - + listVolumes true @@ -1515,8 +1515,8 @@ under the License. - - + + createVolume @@ -1532,7 +1532,7 @@ under the License. diskofferingid globaldiskofferingid - + @@ -1540,8 +1540,8 @@ under the License. datavolumeid1 - - + + deleteVolume true @@ -1551,8 +1551,8 @@ under the License. datavolumeid1 - - + + listVolumes true @@ -1562,10 +1562,10 @@ under the License. datavolumeid1 - - + - + + createSnapshot @@ -1597,7 +1597,7 @@ under the License. - + listSnapshots true @@ -1609,7 +1609,7 @@ under the License. - + deleteSnapshot @@ -1622,7 +1622,7 @@ under the License. - + listSnapshots @@ -1636,7 +1636,7 @@ under the License. - + createSnapshot @@ -1654,7 +1654,7 @@ under the License. - + createTemplate true @@ -1703,7 +1703,7 @@ under the License. - + listTemplates @@ -1745,7 +1745,7 @@ under the License. - + updateTemplate @@ -1766,7 +1766,7 @@ under the License. - + listTemplates true @@ -1788,8 +1788,8 @@ under the License. - - + + listTemplatePermissions @@ -1808,8 +1808,8 @@ under the License. - - + + updateTemplatePermissions @@ -1826,7 +1826,7 @@ under the License. - + listTemplates true @@ -1848,8 +1848,8 @@ under the License. - - + + deleteTemplate @@ -1862,7 +1862,7 @@ under the License. - + listTemplates true @@ -1878,7 +1878,7 @@ under the License. - + stopVirtualMachine true @@ -1890,7 +1890,7 @@ under the License. - + changeServiceForVirtualMachine @@ -1907,7 +1907,7 @@ under the License. - + listVirtualMachines true @@ -1928,8 +1928,8 @@ under the License. - - + + resetPasswordForVirtualMachine @@ -1951,7 +1951,7 @@ under the License. - + listVirtualMachines true @@ -1968,7 +1968,7 @@ under the License. - + listVirtualMachines @@ -1986,7 +1986,7 @@ under the License. - + startVirtualMachine true @@ -2039,7 +2039,7 @@ under the License. - + listVirtualMachines true @@ -2056,7 +2056,7 @@ under the License. - + updateVirtualMachine @@ -2072,7 +2072,7 @@ under the License. - + listVirtualMachines true @@ -2090,7 +2090,7 @@ under the License. - + updateVirtualMachine true @@ -2105,7 +2105,7 @@ under the License. - + listVirtualMachines true @@ -2123,17 +2123,17 @@ under the License. - - - + + + listPublicIpAddresses true 547 - - + + disassociateIpAddress @@ -2146,15 +2146,15 @@ under the License. - - + + listEvents true 40 - + destroyVirtualMachine @@ -2167,9 +2167,9 @@ under the License. - - - + + + registerIso true @@ -2236,8 +2236,8 @@ under the License. - - + + listIsos true @@ -2250,7 +2250,7 @@ under the License. - + registerTemplate @@ -2320,8 +2320,8 @@ under the License. - - + + deleteIso @@ -2347,7 +2347,7 @@ under the License. - + deleteTemplate @@ -2378,5 +2378,5 @@ under the License. - + diff --git a/test/metadata/func/regression_works.xml b/test/metadata/func/regression_works.xml index 731f498ccac..0b2f7543874 100644 --- a/test/metadata/func/regression_works.xml +++ b/test/metadata/func/regression_works.xml @@ -18,10 +18,10 @@ under the License. --> - - + + - + createAccount [Create the Account and a User for the account] @@ -59,7 +59,7 @@ under the License. password - + id @@ -164,11 +164,11 @@ under the License. - + - + listUsers 5 [List the Created User in (3)] @@ -177,7 +177,7 @@ under the License. accountname - + id @@ -222,14 +222,14 @@ under the License. domain ROOT - + - - - + + + - + createUser 3 [Create User Command] @@ -261,9 +261,9 @@ under the License. email nimbus-user@gmail.com - + - + id @@ -312,7 +312,7 @@ under the License. - + @@ -324,7 +324,7 @@ under the License. accountname - + id @@ -429,7 +429,7 @@ under the License. - + @@ -473,7 +473,7 @@ under the License. usersecretkey - + id @@ -534,7 +534,7 @@ under the License. - + @@ -606,11 +606,11 @@ under the License. - + - - + @@ -753,7 +753,7 @@ under the License. - + @@ -825,7 +825,7 @@ under the License. - + @@ -945,7 +945,7 @@ under the License. state locked - + @@ -963,7 +963,7 @@ under the License. domainid 1 - + @@ -1065,7 +1065,7 @@ under the License. state enabled - + @@ -1189,7 +1189,7 @@ under the License. state disabled - + @@ -1207,7 +1207,7 @@ under the License. domainid 1 - + @@ -1309,7 +1309,7 @@ under the License. state enabled - + @@ -1325,7 +1325,7 @@ under the License. domainname - + name @@ -1366,7 +1366,7 @@ under the License. domainname - + id @@ -1414,7 +1414,7 @@ under the License. newdomainname - + id @@ -1456,7 +1456,7 @@ under the License. 1 - + domain 0 @@ -1486,8 +1486,8 @@ under the License. - - + + @@ -1503,7 +1503,7 @@ under the License. true - + success @@ -1528,7 +1528,7 @@ under the License. 1 - + template 0 @@ -1586,7 +1586,7 @@ under the License. size - debian50templatesize + debian50templatesize account @@ -1619,7 +1619,7 @@ under the License. domainid 1 - + @@ -1829,7 +1829,7 @@ under the License. offerha false - + @@ -1867,7 +1867,7 @@ under the License. vlan - guestcidraddress + guestcidraddress networktype @@ -2015,7 +2015,7 @@ under the License. - + sleep.sh @@ -2026,7 +2026,7 @@ under the License. 60 - + @@ -3432,7 +3432,7 @@ under the License. serviceofferingid - globalserviceofferingid + globalserviceofferingid serviceofferingname @@ -3440,7 +3440,7 @@ under the License. serviceofferingdisplaytext - + @@ -3553,7 +3553,7 @@ under the License. destroyed false - + @@ -3648,7 +3648,7 @@ under the License. state Created - + account accountname @@ -3764,7 +3764,7 @@ under the License. state Created - + account accountname @@ -3812,7 +3812,7 @@ under the License. destroyed false - + @@ -3956,7 +3956,7 @@ under the License. intervaltype MANUAL - + @@ -4066,7 +4066,7 @@ under the License. - + createTemplate 958 [Create Template from Snapshot] @@ -4119,7 +4119,7 @@ under the License. isready true - + passwordenabled false @@ -4157,7 +4157,7 @@ under the License. domainid 1 - + @@ -4204,7 +4204,7 @@ under the License. hypervisor XenServer - + @@ -4285,11 +4285,11 @@ under the License. isextractable true - + - - + + sleep.sh @@ -4300,8 +4300,8 @@ under the License. 300 - - + + listTemplates @@ -4347,7 +4347,7 @@ under the License. format VHD - + isfeatured false @@ -4394,7 +4394,7 @@ under the License. domain ROOT - + @@ -4508,7 +4508,7 @@ under the License. 240 - + listTemplates @@ -4711,7 +4711,7 @@ under the License. - + - @@ -8101,7 +8101,7 @@ under the License. state Running - + @@ -8190,7 +8190,7 @@ under the License. state Running - + @@ -8212,7 +8212,7 @@ under the License. systemvmtype consoleproxy - + zoneid globalzoneid @@ -8270,7 +8270,7 @@ under the License. state Stopped - + @@ -8358,7 +8358,7 @@ under the License. state Running - + @@ -8384,7 +8384,7 @@ under the License. privateisoid - + diff --git a/test/metadata/func/resource_limits.xml b/test/metadata/func/resource_limits.xml index e503a03666d..838ec93144a 100644 --- a/test/metadata/func/resource_limits.xml +++ b/test/metadata/func/resource_limits.xml @@ -79,17 +79,17 @@ under the License. account accountname - + id userid - + - - + updateResourceLimit Setup VMs Limit for Account Test Case @@ -166,10 +166,10 @@ under the License. 2 - + - + updateResourceLimit Setup Public-Ips Limit for Account Test Case @@ -200,10 +200,10 @@ under the License. 1 - - + + - + updateResourceLimit Setup Volumes Limit for Account Test Case @@ -234,10 +234,10 @@ under the License. 6 - + - + updateResourceLimit Setup Snapshots Limit for Account Test Case @@ -268,7 +268,7 @@ under the License. 2 - + @@ -302,9 +302,9 @@ under the License. 2 - - - + + + registerUserKeys @@ -325,8 +325,8 @@ under the License. secretkey - - + + createNetwork [Creating default network] @@ -374,7 +374,7 @@ under the License. diskofferingid globaldiskofferingid - + serviceofferingid globalserviceofferingid @@ -386,11 +386,11 @@ under the License. zoneid globalzoneid - + account accountname - + domainid 1 @@ -406,7 +406,7 @@ under the License. networkids networkid - + @@ -424,7 +424,7 @@ under the License. diskofferingid globaldiskofferingid - + serviceofferingid globalserviceofferingid @@ -436,15 +436,15 @@ under the License. zoneid globalzoneid - + account accountname - + domainid 1 - + displayname ResourceLimits-VM-2 @@ -465,38 +465,38 @@ under the License. - + - associateIpAddress + associateIpAddress Associate second IP Test Case true - + zoneid globalzoneid - + account accountname - + domainid 1 - + - + ipaddress ipid - - - + + + @@ -504,7 +504,7 @@ under the License. createVolume Create Fifth Volume Test Case true - + name @@ -513,29 +513,29 @@ under the License. zoneid globalzoneid - + account accountname - + domainid 1 - + diskofferingid globaldiskofferingid - + - + id volid1 - - + + attachVolume @@ -550,7 +550,7 @@ under the License. - + createVolume Create Sixth Volume Test Case @@ -563,19 +563,19 @@ under the License. zoneid globalzoneid - + account accountname - + domainid 1 - + diskofferingid globaldiskofferingid - + @@ -583,7 +583,7 @@ under the License. volid2 - + @@ -599,7 +599,7 @@ under the License. - + createSnapshot @@ -635,7 +635,7 @@ under the License. snpid - + @@ -666,7 +666,7 @@ under the License. zoneid globalzoneid - + account accountname @@ -678,7 +678,7 @@ under the License. hypervisor globalhypervisortype - + @@ -716,7 +716,7 @@ under the License. zoneid globalzoneid - + account accountname @@ -728,7 +728,7 @@ under the License. hypervisor globalhypervisortype - + @@ -749,7 +749,7 @@ under the License. diskofferingid globaldiskofferingid - + serviceofferingid globalserviceofferingid @@ -761,11 +761,11 @@ under the License. zoneid globalzoneid - + account accountname - + domainid 1 @@ -773,7 +773,7 @@ under the License. networkids network - + @@ -782,11 +782,11 @@ under the License. - + - associateIpAddress + associateIpAddress Associate Second IP Failure Test Case true true @@ -794,25 +794,25 @@ under the License. zoneid globalzoneid - + account accountname - + domainid 1 - + - + ipaddress ipid - - + + @@ -829,29 +829,29 @@ under the License. zoneid globalzoneid - + account accountname - + domainid 1 - + diskofferingid globaldiskofferingid - + - + id volid3 - - + + listVolumes @@ -873,7 +873,7 @@ under the License. - + createSnapshot Create Third Snapshot Failure Test Case @@ -924,7 +924,7 @@ under the License. zoneid globalzoneid - + account accountname @@ -936,7 +936,7 @@ under the License. hypervisor globalhypervisortype - + @@ -950,14 +950,14 @@ under the License. deleteAccount Clean up account test case - + id accountname - + @@ -1023,15 +1023,15 @@ under the License. account accountname - + id userid - + - - + updateResourceLimit Failure in Setting Up VMs Limit for Admin Account Test Case true @@ -1108,10 +1108,10 @@ under the License. 2 - + - + updateResourceLimit Failure in Setting Up Public-Ips Limit for Admin Account Test Case true @@ -1143,10 +1143,10 @@ under the License. 1 - - + + - + updateResourceLimit Failure in Setting Up Volumes Limit for Admin Account Test Case true @@ -1178,10 +1178,10 @@ under the License. 6 - + - + updateResourceLimit Failure in Setting up Snapshots Limit for Admin Account Test Case true @@ -1248,9 +1248,9 @@ under the License. 2 - - - + + + registerUserKeys @@ -1271,7 +1271,7 @@ under the License. secretkey - + createNetwork @@ -1309,7 +1309,7 @@ under the License. - + @@ -1320,7 +1320,7 @@ under the License. diskofferingid globaldiskofferingid - + serviceofferingid globalserviceofferingid @@ -1332,11 +1332,11 @@ under the License. zoneid globalzoneid - + account accountname - + domainid 1 @@ -1352,7 +1352,7 @@ under the License. networkids networkid - + @@ -1370,7 +1370,7 @@ under the License. diskofferingid globaldiskofferingid - + serviceofferingid globalserviceofferingid @@ -1382,11 +1382,11 @@ under the License. zoneid globalzoneid - + account accountname - + domainid 1 @@ -1402,7 +1402,7 @@ under the License. networkids networkid - + @@ -1411,43 +1411,43 @@ under the License. - + - associateIpAddress + associateIpAddress Associate second IP Test Case true - + zoneid globalzoneid - + account accountname - + domainid 1 - + - + ipaddress ipid - - + + createVolume Create Fifth Volume Test Case true - + name @@ -1456,30 +1456,30 @@ under the License. zoneid globalzoneid - + account accountname - + domainid 1 - + diskofferingid globaldiskofferingid - + - + id volid1 - - - + + + attachVolume @@ -1492,8 +1492,8 @@ under the License. vmid1 - - + + createVolume Create Sixth Volume Test Case @@ -1506,19 +1506,19 @@ under the License. zoneid globalzoneid - + account accountname - + domainid 1 - + diskofferingid globaldiskofferingid - + @@ -1526,7 +1526,7 @@ under the License. volid2 - + @@ -1542,7 +1542,7 @@ under the License. - + createSnapshot @@ -1609,7 +1609,7 @@ under the License. zoneid globalzoneid - + account accountname @@ -1621,7 +1621,7 @@ under the License. hypervisor globalhypervisortype - + @@ -1659,7 +1659,7 @@ under the License. zoneid globalzoneid - + account accountname @@ -1671,7 +1671,7 @@ under the License. hypervisor globalhypervisortype - + @@ -1687,12 +1687,12 @@ under the License. deployVirtualMachine Deploy VM3 Failure Avoidance test case true - + diskofferingid globaldiskofferingid - + serviceofferingid globalserviceofferingid @@ -1704,11 +1704,11 @@ under the License. zoneid globalzoneid - + account accountname - + domainid 1 @@ -1724,7 +1724,7 @@ under the License. networkids networkid - + @@ -1733,37 +1733,37 @@ under the License. - + - associateIpAddress + associateIpAddress Associate Second IP Failure Avoidance Test Case true - + zoneid globalzoneid - + account accountname - + domainid 1 - + - + ipaddress ipid - - + + @@ -1771,7 +1771,7 @@ under the License. createVolume Create Seventh Volume Failure Avoidance Test Case true - + name @@ -1780,31 +1780,31 @@ under the License. zoneid globalzoneid - + account accountname - + domainid 1 - + diskofferingid globaldiskofferingid - + - + id volid3 - - - + + + createSnapshot Create Third Snapshot Failure Test Case @@ -1855,7 +1855,7 @@ under the License. zoneid globalzoneid - + account accountname @@ -1867,8 +1867,8 @@ under the License. hypervisor globalhypervisortype - - + + @@ -1882,13 +1882,13 @@ under the License. deleteAccount Clean up admin account test case - + id accountid - + diff --git a/test/metadata/func/roughflatstress.xml b/test/metadata/func/roughflatstress.xml index 841190c496f..291d434348a 100644 --- a/test/metadata/func/roughflatstress.xml +++ b/test/metadata/func/roughflatstress.xml @@ -17,7 +17,7 @@ specific language governing permissions and limitations under the License. --> - + createAccount [Create the Account and a User for the account] @@ -66,7 +66,7 @@ under the License. - + listUsers [List the User of the account] @@ -99,7 +99,7 @@ under the License. - + registerUserKeys Registering the First User @@ -120,7 +120,7 @@ under the License. - + createSecurityGroup true @@ -149,7 +149,7 @@ under the License. - + authorizeSecurityGroupIngress true @@ -185,7 +185,7 @@ under the License. --> - + listSecurityGroups true @@ -211,7 +211,7 @@ under the License. - + deployVirtualMachine true @@ -244,7 +244,7 @@ under the License. domainid globaldomainid - + --> @@ -254,10 +254,10 @@ under the License. ipaddress getvmip1 - + - + sleep.sh @@ -268,8 +268,8 @@ under the License. 120 - - + + ssh.sh @@ -285,7 +285,7 @@ under the License. - + sleep.sh @@ -296,8 +296,8 @@ under the License. 60 - - + + revokeSecurityGroupIngress true @@ -309,7 +309,7 @@ under the License. - +--> - + createSecurityGroup true @@ -358,7 +358,7 @@ under the License. - + authorizeSecurityGroupIngress true @@ -375,7 +375,7 @@ under the License. usersecuritygrouplist[0].group getsecuritygroupname1 - + endport 22 @@ -398,8 +398,8 @@ under the License. --> - - + + authorizeSecurityGroupIngress true @@ -435,7 +435,7 @@ under the License. --> - + listSecurityGroups true @@ -461,7 +461,7 @@ under the License. - + deployVirtualMachine true @@ -494,7 +494,7 @@ under the License. domainid globaldomainid - + --> @@ -504,10 +504,10 @@ under the License. ipaddress getvmip2 - + - + sleep.sh @@ -518,8 +518,8 @@ under the License. 120 - - + + ssh.sh @@ -535,7 +535,7 @@ under the License. - + sleep.sh @@ -546,8 +546,8 @@ under the License. 60 - - + + revokeSecurityGroupIngress true @@ -559,7 +559,7 @@ under the License. - + listSecurityGroups true @@ -585,7 +585,7 @@ under the License. - + revokeSecurityGroupIngress true @@ -597,8 +597,8 @@ under the License. - - +--> destroyVirtualMachine true @@ -627,7 +627,7 @@ under the License. - + sleep.sh @@ -651,7 +651,7 @@ under the License. - + sleep.sh @@ -663,9 +663,9 @@ under the License. - - - + + + deleteUser Deleting the user @@ -675,8 +675,8 @@ under the License. getuserid1 - - + + deleteAccount Delete Account @@ -686,6 +686,6 @@ under the License. getaccountid - - + + diff --git a/test/metadata/func/roughregression.xml b/test/metadata/func/roughregression.xml index f6de2323557..64c120a8bd4 100644 --- a/test/metadata/func/roughregression.xml +++ b/test/metadata/func/roughregression.xml @@ -199,7 +199,7 @@ under the License. domain getdomainname1 - + @@ -243,7 +243,7 @@ under the License. - + @@ -380,7 +380,7 @@ under the License. - + createUser @@ -417,7 +417,7 @@ under the License. domainid globaldomainid - + @@ -794,7 +794,7 @@ under the License. state locked - + @@ -810,7 +810,7 @@ under the License. domainid globaldomainid - + @@ -912,7 +912,7 @@ under the License. state enabled - + @@ -974,7 +974,7 @@ under the License. vmavailable Unlimited - + ipavailable Unlimited @@ -1034,7 +1034,7 @@ under the License. state disabled - + @@ -1050,7 +1050,7 @@ under the License. domainid globaldomainid - + @@ -1152,7 +1152,7 @@ under the License. state enabled - + @@ -1189,7 +1189,7 @@ under the License. getdomainname2 - + id @@ -1261,8 +1261,8 @@ under the License. false - - + + @@ -1274,7 +1274,7 @@ under the License. globaldomainid - + domain 0 @@ -1304,7 +1304,7 @@ under the License. - + deleteDomain @@ -1319,7 +1319,7 @@ under the License. true - + success @@ -1342,7 +1342,7 @@ under the License. 1 - + template 0 @@ -1396,7 +1396,7 @@ under the License. size - debian50templatesize + debian50templatesize account @@ -1429,7 +1429,7 @@ under the License. domainid 1 - + @@ -1636,7 +1636,7 @@ under the License. offerha false - + @@ -1672,7 +1672,7 @@ under the License. vlan - guestcidraddress + guestcidraddress networktype @@ -1680,7 +1680,7 @@ under the License. - + @@ -1770,7 +1770,7 @@ under the License. - + deployVirtualMachine [2] deployVirtualMachine-TestCase @@ -1814,7 +1814,7 @@ under the License. domainid globaldomainid - + @@ -1823,7 +1823,7 @@ under the License. - + updateVirtualMachine [3] updateVirtualMachine-TestCase @@ -1850,9 +1850,9 @@ under the License. - + - + associateIpAddress [4] associateIpAddress-TestCase @@ -1881,7 +1881,7 @@ under the License. - + enableStaticNat [5] enableStaticNat-TestCase @@ -1896,7 +1896,7 @@ under the License. - + createIpForwardingRule [6] createIpForwardingRule-TestCase @@ -1922,10 +1922,10 @@ under the License. id getipfwdruleid1 - + - + sleep.sh @@ -1936,8 +1936,8 @@ under the License. 120 - - + + ssh.sh @@ -1953,7 +1953,7 @@ under the License. - + deleteIpForwardingRule [7] deleteIpForwardingRule-TestCase @@ -1964,7 +1964,7 @@ under the License. - + disableStaticNat [8] disableStaticNat-TestCase @@ -1975,7 +1975,7 @@ under the License. - + disassociateIpAddress [9] disassociateIpAddress-TestCase @@ -1986,7 +1986,7 @@ under the License. - + associateIpAddress [10] associateIpAddress-TestCase @@ -2015,7 +2015,7 @@ under the License. - + createPortForwardingRule [11] createPortForwardingRule-TestCase @@ -2058,7 +2058,7 @@ under the License. id getportfwdruleid1 - + @@ -2073,8 +2073,8 @@ under the License. 120 - - + + ssh.sh @@ -2090,7 +2090,7 @@ under the License. - + deletePortForwardingRule [12] deletePortForwardingRule-TestCase @@ -2101,7 +2101,7 @@ under the License. - + disassociateIpAddress [13] disassociateIpAddress-TestCase @@ -2112,7 +2112,7 @@ under the License. - + associateIpAddress [14] associateIpAddress-TestCase @@ -2141,7 +2141,7 @@ under the License. - + createLoadBalancerRule [15] createLoadBalancerRule-TestCase @@ -2174,7 +2174,7 @@ under the License. - + assignToLoadBalancerRule [16] assignToLoadBalancerRule-TestCase @@ -2189,7 +2189,7 @@ under the License. - + listVolumes @@ -2384,7 +2384,7 @@ under the License. - + createSnapshot [26] createSnapshotofROOTVolume-TestCase @@ -2409,7 +2409,7 @@ under the License. - + listSnapshots [27] listROOTVolumeSnapshot-TestCase @@ -2420,7 +2420,7 @@ under the License. - + listVolumes [26] listDATADISKVolumes-TestCase @@ -2466,7 +2466,7 @@ under the License. - + listSnapshots [29] listDATADISKVolumeSnapshot-TestCase @@ -2477,7 +2477,7 @@ under the License. - + createTemplate [30] createTemplateFromSnapshotTakenofROOTVolume-TestCase @@ -2518,7 +2518,7 @@ under the License. - + createTemplate [31] createTemplateFromSnapshotTakenofDATADISKVolume-TestCase @@ -2559,7 +2559,7 @@ under the License. - + createVolume [32] createVolumeFromSnapshotTakenofROOTVolume-TestCase @@ -2588,7 +2588,7 @@ under the License. - + createVolume [33] createVolumeFromSnapshotTakenofDATADISKVolume-TestCase @@ -2618,7 +2618,7 @@ under the License. - + attachVolume [34] attachVolume1-TestCase @@ -2633,7 +2633,7 @@ under the License. - + attachVolume [35] attachVolume2-TestCase @@ -2648,9 +2648,9 @@ under the License. - + - + detachVolume [36] detachVolume1-TestCase @@ -2661,7 +2661,7 @@ under the License. - + detachVolume [37] detachVolume2-TestCase @@ -2672,7 +2672,7 @@ under the License. - + attachVolume [34] attachVolume1-TestCase @@ -2687,7 +2687,7 @@ under the License. - + attachVolume [35] attachVolume2-TestCase @@ -2702,7 +2702,7 @@ under the License. - + stopVirtualMachine [40] stopVirtualMachine-TestCase @@ -2713,7 +2713,7 @@ under the License. - + updateVirtualMachine [41] updateVirtualMachine-TestCase @@ -2740,7 +2740,7 @@ under the License. - + detachVolume [36] detachVolume1-TestCase @@ -2751,7 +2751,7 @@ under the License. - + detachVolume [37] detachVolume2-TestCase @@ -2762,7 +2762,7 @@ under the License. - + createTemplate [44] createTemplateFromROOTVolume-TestCase @@ -2799,7 +2799,7 @@ under the License. - + startVirtualMachine [45] startVirtualMachine-TestCase @@ -2810,7 +2810,7 @@ under the License. - + rebootVirtualMachine [46] rebootVirtualMachine-TestCase @@ -2821,7 +2821,7 @@ under the License. - + createSnapshotPolicy [47] createHOURLYSnapshotPolicyForRootVolume-TestCase @@ -2848,7 +2848,7 @@ under the License. - + createSnapshotPolicy [48] createDAILYSnapshotPolicy-TestCase @@ -2875,7 +2875,7 @@ under the License. - + createSnapshotPolicy [49] createWEEKLYSnapshotPolicy-TestCase @@ -2902,7 +2902,7 @@ under the License. - + createSnapshotPolicy [50] createMONTHLYSnapshotPolicy-TestCase @@ -2935,7 +2935,7 @@ under the License. - + deleteSnapshotPolicies [51] deleteMONTHLYSnapshotPolicies-TestCase @@ -2946,7 +2946,7 @@ under the License. - + deleteVolume [52] deleteVolume1-TestCase @@ -2957,7 +2957,7 @@ under the License. - + deleteVolume [53] deleteVolume2-TestCase @@ -2970,7 +2970,7 @@ under the License. - + destroyVirtualMachine [54] destroyVirtualMachine-TestCase @@ -2981,7 +2981,7 @@ under the License. - + recoverVirtualMachine [55] recoverVirtualMachine-TestCase @@ -2992,7 +2992,7 @@ under the License. - + startVirtualMachine [56] startVirtualMachine-TestCase @@ -3003,7 +3003,7 @@ under the License. - + rebootVirtualMachine [57] rebootVirtualMachine-TestCase @@ -3014,7 +3014,7 @@ under the License. - + stopVirtualMachine [58] stopVirtualMachine-TestCase @@ -3025,7 +3025,7 @@ under the License. - + destroyVirtualMachine [59] destroyVirtualMachine-TestCase @@ -3036,7 +3036,7 @@ under the License. - + recoverVirtualMachine [60] recoverVirtualMachine-TestCase @@ -3047,7 +3047,7 @@ under the License. - + changeServiceForVirtualMachine [61] changeServiceForVirtualMachine-TestCase @@ -3062,7 +3062,7 @@ under the License. - + startVirtualMachine [62] startVirtualMachine-TestCase @@ -3073,7 +3073,7 @@ under the License. - + rebootVirtualMachine [63] rebootVirtualMachine-TestCase @@ -3084,7 +3084,7 @@ under the License. - + stopVirtualMachine [64] stopVirtualMachine-TestCase @@ -3095,7 +3095,7 @@ under the License. - + destroyVirtualMachine [65] destroyVirtualMachine-TestCase @@ -3106,7 +3106,7 @@ under the License. - + deployVirtualMachine [66] deployVirtualMachineFromTemplateCreatedFromSnapshotofROOTVolume-TestCase @@ -3166,7 +3166,7 @@ under the License. - + deleteTemplate [68] deleteTemplateofSnapshotTakenofDATAVolume-TestCase @@ -3181,7 +3181,7 @@ under the License. - + deleteTemplate [69] deleteTemplateCreatedFromROOTVolume-TestCase @@ -3196,7 +3196,7 @@ under the License. - + deleteSnapshot [70] deleteSnapshot-TestCase @@ -3207,7 +3207,7 @@ under the License. - + deleteSnapshot [71] deleteSnapshot-TestCase @@ -3248,9 +3248,9 @@ under the License. - + - + associateIpAddress [4] associateIpAddress-TestCase @@ -3279,7 +3279,7 @@ under the License. - + enableStaticNat [5] enableStaticNat-TestCase @@ -3294,7 +3294,7 @@ under the License. - + createIpForwardingRule [6] createIpForwardingRule-TestCase @@ -3320,10 +3320,10 @@ under the License. id getipfwdruleid1 - + - + sleep.sh @@ -3334,8 +3334,8 @@ under the License. 120 - - + + ssh.sh @@ -3351,7 +3351,7 @@ under the License. - + deleteIpForwardingRule [7] deleteIpForwardingRule-TestCase @@ -3362,7 +3362,7 @@ under the License. - + disableStaticNat [8] disableStaticNat-TestCase @@ -3373,7 +3373,7 @@ under the License. - + disassociateIpAddress [9] disassociateIpAddress-TestCase @@ -3384,7 +3384,7 @@ under the License. - + associateIpAddress [10] associateIpAddress-TestCase @@ -3413,7 +3413,7 @@ under the License. - + createPortForwardingRule [11] createPortForwardingRule-TestCase @@ -3456,7 +3456,7 @@ under the License. id getportfwdruleid1 - + @@ -3471,8 +3471,8 @@ under the License. 120 - - + + ssh.sh @@ -3488,7 +3488,7 @@ under the License. - + deletePortForwardingRule [12] deletePortForwardingRule-TestCase @@ -3499,7 +3499,7 @@ under the License. - + disassociateIpAddress [13] disassociateIpAddress-TestCase @@ -3510,7 +3510,7 @@ under the License. - + associateIpAddress [14] associateIpAddress-TestCase @@ -3539,7 +3539,7 @@ under the License. - + createLoadBalancerRule [15] createLoadBalancerRule-TestCase @@ -3572,7 +3572,7 @@ under the License. - + assignToLoadBalancerRule [16] assignToLoadBalancerRule-TestCase @@ -3587,7 +3587,7 @@ under the License. - + +--> deleteLoadBalancerRule [24] deleteLoadBalancerRule-TestCase @@ -3745,10 +3745,10 @@ under the License. id getldblcrule1 - + - + disassociateIpAddress [25] disassociateIpAddress-TestCase @@ -3759,7 +3759,7 @@ under the License. - + listVolumes @@ -3781,7 +3781,7 @@ under the License. - + createSnapshot [26] createSnapshotofROOTVolume-TestCase @@ -3806,7 +3806,7 @@ under the License. - + listSnapshots [27] listROOTVolumeSnapshot-TestCase @@ -3817,7 +3817,7 @@ under the License. - + listVolumes [26] listDATADISKVolumes-TestCase @@ -3863,7 +3863,7 @@ under the License. - + listSnapshots [29] listDATADISKVolumeSnapshot-TestCase @@ -3874,7 +3874,7 @@ under the License. - + createTemplate [30] createTemplateFromSnapshotTakenofROOTVolume-TestCase @@ -3915,7 +3915,7 @@ under the License. - + createTemplate [31] createTemplateFromSnapshotTakenofDATADISKVolume-TestCase @@ -3956,7 +3956,7 @@ under the License. - + createVolume [32] createVolumeFromSnapshotTakenofROOTVolume-TestCase @@ -3985,7 +3985,7 @@ under the License. - + createVolume [33] createVolumeFromSnapshotTakenofDATADISKVolume-TestCase @@ -4015,7 +4015,7 @@ under the License. - + attachVolume [34] attachVolume1-TestCase @@ -4030,7 +4030,7 @@ under the License. - + attachVolume [35] attachVolume2-TestCase @@ -4045,9 +4045,9 @@ under the License. - + - + detachVolume [36] detachVolume1-TestCase @@ -4058,7 +4058,7 @@ under the License. - + detachVolume [37] detachVolume2-TestCase @@ -4069,7 +4069,7 @@ under the License. - + attachVolume [34] attachVolume1-TestCase @@ -4084,7 +4084,7 @@ under the License. - + attachVolume [35] attachVolume2-TestCase @@ -4099,7 +4099,7 @@ under the License. - + stopVirtualMachine [40] stopVirtualMachine-TestCase @@ -4110,7 +4110,7 @@ under the License. - + updateVirtualMachine [41] updateVirtualMachine-TestCase @@ -4137,7 +4137,7 @@ under the License. - + detachVolume [36] detachVolume1-TestCase @@ -4148,7 +4148,7 @@ under the License. - + detachVolume [37] detachVolume2-TestCase @@ -4159,7 +4159,7 @@ under the License. - + createTemplate [44] createTemplateFromROOTVolume-TestCase @@ -4196,7 +4196,7 @@ under the License. - + startVirtualMachine [45] startVirtualMachine-TestCase @@ -4207,7 +4207,7 @@ under the License. - + rebootVirtualMachine [46] rebootVirtualMachine-TestCase @@ -4218,7 +4218,7 @@ under the License. - + createSnapshotPolicy [47] createHOURLYSnapshotPolicyForRootVolume-TestCase @@ -4245,7 +4245,7 @@ under the License. - + createSnapshotPolicy [48] createDAILYSnapshotPolicy-TestCase @@ -4272,7 +4272,7 @@ under the License. - + createSnapshotPolicy [49] createWEEKLYSnapshotPolicy-TestCase @@ -4299,7 +4299,7 @@ under the License. - + createSnapshotPolicy [50] createMONTHLYSnapshotPolicy-TestCase @@ -4332,7 +4332,7 @@ under the License. - + deleteSnapshotPolicies [51] deleteMONTHLYSnapshotPolicies-TestCase @@ -4343,7 +4343,7 @@ under the License. - + deleteVolume [52] deleteVolume1-TestCase @@ -4354,7 +4354,7 @@ under the License. - + deleteVolume [53] deleteVolume2-TestCase @@ -4367,7 +4367,7 @@ under the License. - + destroyVirtualMachine [54] destroyVirtualMachine-TestCase @@ -4378,7 +4378,7 @@ under the License. - + recoverVirtualMachine [55] recoverVirtualMachine-TestCase @@ -4389,7 +4389,7 @@ under the License. - + startVirtualMachine [56] startVirtualMachine-TestCase @@ -4400,7 +4400,7 @@ under the License. - + rebootVirtualMachine [57] rebootVirtualMachine-TestCase @@ -4411,7 +4411,7 @@ under the License. - + stopVirtualMachine [58] stopVirtualMachine-TestCase @@ -4422,7 +4422,7 @@ under the License. - + destroyVirtualMachine [59] destroyVirtualMachine-TestCase @@ -4433,7 +4433,7 @@ under the License. - + recoverVirtualMachine [60] recoverVirtualMachine-TestCase @@ -4444,7 +4444,7 @@ under the License. - + changeServiceForVirtualMachine [61] changeServiceForVirtualMachine-TestCase @@ -4459,7 +4459,7 @@ under the License. - + startVirtualMachine [62] startVirtualMachine-TestCase @@ -4470,7 +4470,7 @@ under the License. - + rebootVirtualMachine [63] rebootVirtualMachine-TestCase @@ -4481,7 +4481,7 @@ under the License. - + stopVirtualMachine [64] stopVirtualMachine-TestCase @@ -4492,7 +4492,7 @@ under the License. - + destroyVirtualMachine [65] destroyVirtualMachine-TestCase @@ -4503,7 +4503,7 @@ under the License. - + @@ -4511,7 +4511,7 @@ under the License. - + - + associateIpAddress [4] associateIpAddress-TestCase @@ -4623,7 +4623,7 @@ under the License. - + enableStaticNat [5] enableStaticNat-TestCase @@ -4638,7 +4638,7 @@ under the License. - + createIpForwardingRule [6] createIpForwardingRule-TestCase @@ -4664,10 +4664,10 @@ under the License. id getipfwdruleid1 - + - + sleep.sh @@ -4678,8 +4678,8 @@ under the License. 120 - - + + ssh.sh @@ -4695,7 +4695,7 @@ under the License. - + deleteIpForwardingRule [7] deleteIpForwardingRule-TestCase @@ -4706,7 +4706,7 @@ under the License. - + disableStaticNat [8] disableStaticNat-TestCase @@ -4717,7 +4717,7 @@ under the License. - + disassociateIpAddress [9] disassociateIpAddress-TestCase @@ -4728,7 +4728,7 @@ under the License. - + associateIpAddress [10] associateIpAddress-TestCase @@ -4757,7 +4757,7 @@ under the License. - + createPortForwardingRule [11] createPortForwardingRule-TestCase @@ -4800,7 +4800,7 @@ under the License. id getportfwdruleid1 - + @@ -4815,8 +4815,8 @@ under the License. 120 - - + + ssh.sh @@ -4832,7 +4832,7 @@ under the License. - + deletePortForwardingRule [12] deletePortForwardingRule-TestCase @@ -4843,7 +4843,7 @@ under the License. - + disassociateIpAddress [13] disassociateIpAddress-TestCase @@ -4854,7 +4854,7 @@ under the License. - + associateIpAddress [14] associateIpAddress-TestCase @@ -4883,7 +4883,7 @@ under the License. - + createLoadBalancerRule [15] createLoadBalancerRule-TestCase @@ -4916,7 +4916,7 @@ under the License. - + assignToLoadBalancerRule [16] assignToLoadBalancerRule-TestCase @@ -4931,7 +4931,7 @@ under the License. - + listVolumes @@ -5126,7 +5126,7 @@ under the License. - + createSnapshot [26] createSnapshotofROOTVolume-TestCase @@ -5151,7 +5151,7 @@ under the License. - + listSnapshots [27] listROOTVolumeSnapshot-TestCase @@ -5162,7 +5162,7 @@ under the License. - + listVolumes [26] listDATADISKVolumes-TestCase @@ -5208,7 +5208,7 @@ under the License. - + listSnapshots [29] listDATADISKVolumeSnapshot-TestCase @@ -5219,7 +5219,7 @@ under the License. - + createTemplate [30] createTemplateFromSnapshotTakenofROOTVolume-TestCase @@ -5260,7 +5260,7 @@ under the License. - + createTemplate [31] createTemplateFromSnapshotTakenofDATADISKVolume-TestCase @@ -5301,7 +5301,7 @@ under the License. - + createVolume [32] createVolumeFromSnapshotTakenofROOTVolume-TestCase @@ -5330,7 +5330,7 @@ under the License. - + createVolume [33] createVolumeFromSnapshotTakenofDATADISKVolume-TestCase @@ -5360,7 +5360,7 @@ under the License. - + attachVolume [34] attachVolume1-TestCase @@ -5375,7 +5375,7 @@ under the License. - + attachVolume [35] attachVolume2-TestCase @@ -5390,9 +5390,9 @@ under the License. - + - + detachVolume [36] detachVolume1-TestCase @@ -5403,7 +5403,7 @@ under the License. - + detachVolume [37] detachVolume2-TestCase @@ -5414,7 +5414,7 @@ under the License. - + attachVolume [34] attachVolume1-TestCase @@ -5429,7 +5429,7 @@ under the License. - + attachVolume [35] attachVolume2-TestCase @@ -5444,7 +5444,7 @@ under the License. - + stopVirtualMachine [40] stopVirtualMachine-TestCase @@ -5455,7 +5455,7 @@ under the License. - + updateVirtualMachine [41] updateVirtualMachine-TestCase @@ -5482,7 +5482,7 @@ under the License. - + detachVolume [36] detachVolume1-TestCase @@ -5493,7 +5493,7 @@ under the License. - + detachVolume [37] detachVolume2-TestCase @@ -5504,7 +5504,7 @@ under the License. - + createTemplate [44] createTemplateFromROOTVolume-TestCase @@ -5541,7 +5541,7 @@ under the License. - + startVirtualMachine [45] startVirtualMachine-TestCase @@ -5552,7 +5552,7 @@ under the License. - + rebootVirtualMachine [46] rebootVirtualMachine-TestCase @@ -5563,7 +5563,7 @@ under the License. - + createSnapshotPolicy [47] createHOURLYSnapshotPolicyForRootVolume-TestCase @@ -5590,7 +5590,7 @@ under the License. - + createSnapshotPolicy [48] createDAILYSnapshotPolicy-TestCase @@ -5617,7 +5617,7 @@ under the License. - + createSnapshotPolicy [49] createWEEKLYSnapshotPolicy-TestCase @@ -5644,7 +5644,7 @@ under the License. - + createSnapshotPolicy [50] createMONTHLYSnapshotPolicy-TestCase @@ -5677,7 +5677,7 @@ under the License. - + deleteSnapshotPolicies [51] deleteMONTHLYSnapshotPolicies-TestCase @@ -5688,7 +5688,7 @@ under the License. - + deleteVolume [52] deleteVolume1-TestCase @@ -5699,7 +5699,7 @@ under the License. - + deleteVolume [53] deleteVolume2-TestCase @@ -5712,7 +5712,7 @@ under the License. - + destroyVirtualMachine [54] destroyVirtualMachine-TestCase @@ -5723,7 +5723,7 @@ under the License. - + recoverVirtualMachine [55] recoverVirtualMachine-TestCase @@ -5734,7 +5734,7 @@ under the License. - + startVirtualMachine [56] startVirtualMachine-TestCase @@ -5745,7 +5745,7 @@ under the License. - + rebootVirtualMachine [57] rebootVirtualMachine-TestCase @@ -5756,7 +5756,7 @@ under the License. - + stopVirtualMachine [58] stopVirtualMachine-TestCase @@ -5767,7 +5767,7 @@ under the License. - + destroyVirtualMachine [59] destroyVirtualMachine-TestCase @@ -5778,7 +5778,7 @@ under the License. - + recoverVirtualMachine [60] recoverVirtualMachine-TestCase @@ -5789,7 +5789,7 @@ under the License. - + changeServiceForVirtualMachine [61] changeServiceForVirtualMachine-TestCase @@ -5804,7 +5804,7 @@ under the License. - + startVirtualMachine [62] startVirtualMachine-TestCase @@ -5815,7 +5815,7 @@ under the License. - + rebootVirtualMachine [63] rebootVirtualMachine-TestCase @@ -5826,7 +5826,7 @@ under the License. - + stopVirtualMachine [64] stopVirtualMachine-TestCase @@ -5837,7 +5837,7 @@ under the License. - + destroyVirtualMachine [65] destroyVirtualMachine-TestCase @@ -5848,7 +5848,7 @@ under the License. - + deployVirtualMachine [66] deployVirtualMachineFromTemplateCreatedFromSnapshotofROOTVolume-TestCase @@ -5908,7 +5908,7 @@ under the License. - + deleteTemplate [68] deleteTemplateofSnapshotTakenofDATAVolume-TestCase @@ -5923,7 +5923,7 @@ under the License. - + deleteTemplate [69] deleteTemplateCreatedFromROOTVolume-TestCase @@ -5938,7 +5938,7 @@ under the License. - + deleteSnapshot [70] deleteSnapshot-TestCase @@ -5949,7 +5949,7 @@ under the License. - + deleteSnapshot [71] deleteSnapshot-TestCase @@ -6008,5 +6008,5 @@ under the License. getaccountid - - + + diff --git a/test/metadata/func/sanity.xml b/test/metadata/func/sanity.xml index 3094abd136d..6316ed6d2ed 100644 --- a/test/metadata/func/sanity.xml +++ b/test/metadata/func/sanity.xml @@ -18,7 +18,7 @@ under the License. --> - @@ -84,7 +84,7 @@ under the License. - + createNetwork @@ -122,7 +122,7 @@ under the License. - + deployVirtualMachine [Deploy a Virtual Machine under the Account] @@ -167,8 +167,8 @@ under the License. - - + + associateIpAddress [Acquire an IP Address (Non-Source NAT*)] @@ -190,14 +190,14 @@ under the License. id nonsourcenatpublicipid - + ipaddress nonsourcenatpublicip - + disassociateIpAddress [Release the IP Address (Non-Source NAT*)] @@ -208,7 +208,7 @@ under the License. - + rebootVirtualMachine [Reboot Virtual Machine] @@ -219,7 +219,7 @@ under the License. - + stopVirtualMachine [Stop virtual machine] @@ -230,7 +230,7 @@ under the License. - + startVirtualMachine [Start virtual machine] @@ -247,8 +247,8 @@ under the License. - - + + listRouters [List Routers] @@ -272,9 +272,9 @@ under the License. routerid - - - + + + stopRouter [Stop router] @@ -313,7 +313,7 @@ under the License. - + listVirtualMachines [Verify that vm was destroyed] @@ -330,7 +330,7 @@ under the License. - + recoverVirtualMachine [Recover virtual machine] @@ -341,7 +341,7 @@ under the License. - + listVirtualMachines [Verify that machine was recovered] @@ -362,7 +362,7 @@ under the License. - + deleteAccount [Delete account] @@ -372,7 +372,7 @@ under the License. accountid - - - + + + diff --git a/test/metadata/func/securitygroups.xml b/test/metadata/func/securitygroups.xml index 3ecffbf8467..55fc32aa270 100644 --- a/test/metadata/func/securitygroups.xml +++ b/test/metadata/func/securitygroups.xml @@ -17,7 +17,7 @@ specific language governing permissions and limitations under the License. --> - @@ -78,7 +78,7 @@ under the License. - + registerUserKeys Registering the User @@ -99,7 +99,7 @@ under the License. - + createNetworkGroup true @@ -113,7 +113,7 @@ under the License. - + authorizeNetworkGroupIngress Add the First Ingress Rule @@ -148,7 +148,7 @@ under the License. - + deployVirtualMachine true @@ -182,7 +182,7 @@ under the License. - + sleep.sh @@ -193,8 +193,8 @@ under the License. 120 - - + + ssh.sh @@ -210,7 +210,7 @@ under the License. - + sleep.sh @@ -221,8 +221,8 @@ under the License. 60 - - + + stopVirtualMachine true @@ -234,7 +234,7 @@ under the License. - + startVirtualMachine true @@ -246,7 +246,7 @@ under the License. - + rebootVirtualMachine true @@ -293,7 +293,7 @@ under the License. - + ssh.sh @@ -310,9 +310,9 @@ under the License. - + - + authorizeNetworkGroupIngress Add the Second Ingress Rule @@ -347,7 +347,7 @@ under the License. - + deployVirtualMachine true @@ -381,7 +381,7 @@ under the License. - + sleep.sh @@ -392,8 +392,8 @@ under the License. 120 - - + + ssh.sh @@ -410,7 +410,7 @@ under the License. - + revokeNetworkGroupIngress Revoke The Second Ingress Rule @@ -447,7 +447,7 @@ under the License. - + authorizeNetworkGroupIngress Add the Third Ingress Rule @@ -482,7 +482,7 @@ under the License. - + deployVirtualMachine true @@ -516,7 +516,7 @@ under the License. - + sleep.sh @@ -527,8 +527,8 @@ under the License. 120 - - + + ssh.sh @@ -545,7 +545,7 @@ under the License. - + revokeNetworkGroupIngress Revoke The Third Ingress Rule @@ -580,9 +580,9 @@ under the License. - + - + authorizeNetworkGroupIngress Add the Fourth Ingress Rule @@ -617,7 +617,7 @@ under the License. - + deployVirtualMachine true @@ -651,7 +651,7 @@ under the License. - + sleep.sh @@ -662,8 +662,8 @@ under the License. 120 - - + + ssh.sh @@ -680,7 +680,7 @@ under the License. - + revokeNetworkGroupIngress Revoke The Fourth Ingress Rule @@ -715,8 +715,8 @@ under the License. - - + + deleteNetworkGroup @@ -729,7 +729,7 @@ under the License. - + destroyVirtualMachine true @@ -741,7 +741,7 @@ under the License. - + sleep.sh @@ -752,10 +752,10 @@ under the License. 120 - - - - + + + + deleteUser Deleting the user @@ -765,6 +765,6 @@ under the License. userid - + diff --git a/test/metadata/func/sharedstorage_volume_test.xml b/test/metadata/func/sharedstorage_volume_test.xml index c34afc47baf..697887998ec 100644 --- a/test/metadata/func/sharedstorage_volume_test.xml +++ b/test/metadata/func/sharedstorage_volume_test.xml @@ -78,12 +78,12 @@ under the License. account accountvar - + id userid - + @@ -126,7 +126,7 @@ under the License. ---> +--> registerUserKeys @@ -145,7 +145,7 @@ under the License. secretkey - + createNetwork @@ -200,7 +200,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -208,7 +208,7 @@ under the License. account accountvar - + domainid 1 @@ -224,7 +224,7 @@ under the License. networkids networkid - + @@ -233,7 +233,7 @@ under the License. - + createVolume true @@ -248,7 +248,7 @@ under the License. diskofferingid globaldiskofferingid - + @@ -257,7 +257,7 @@ under the License. - + attachVolume true @@ -273,7 +273,7 @@ under the License. - + @@ -286,7 +286,7 @@ under the License. - + createVolume true @@ -301,7 +301,7 @@ under the License. diskofferingid globaldiskofferingid - + @@ -310,7 +310,7 @@ under the License. - + attachVolume true @@ -326,8 +326,8 @@ under the License. - - + + createVolume @@ -344,7 +344,7 @@ under the License. diskofferingid globaldiskofferingid - + @@ -352,9 +352,9 @@ under the License. DATADISK - - - + + + deployVirtualMachine @@ -371,7 +371,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -379,7 +379,7 @@ under the License. account accountvar - + domainid 1 @@ -395,7 +395,7 @@ under the License. networkids networkid - + @@ -404,7 +404,7 @@ under the License. - + listVolumes @@ -424,7 +424,7 @@ under the License. - + detachVolume true @@ -458,7 +458,7 @@ under the License. - + detachVolume true @@ -471,8 +471,8 @@ under the License. - - + + deployVirtualMachine @@ -489,7 +489,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -497,7 +497,7 @@ under the License. account accountvar - + domainid 1 @@ -513,7 +513,7 @@ under the License. networkids networkid - + @@ -522,7 +522,7 @@ under the License. - + createVolume true @@ -537,7 +537,7 @@ under the License. diskofferingid globaldiskofferingid - + @@ -546,7 +546,7 @@ under the License. - + createVolume true @@ -561,7 +561,7 @@ under the License. diskofferingid globaldiskofferingid - + @@ -570,7 +570,7 @@ under the License. - + createVolume true @@ -585,7 +585,7 @@ under the License. diskofferingid globaldiskofferingid - + @@ -594,7 +594,7 @@ under the License. - + createVolume true @@ -609,7 +609,7 @@ under the License. diskofferingid globaldiskofferingid - + @@ -618,7 +618,7 @@ under the License. - + createVolume true @@ -633,7 +633,7 @@ under the License. diskofferingid globaldiskofferingid - + @@ -642,7 +642,7 @@ under the License. - + attachVolume true @@ -657,7 +657,7 @@ under the License. - + attachVolume true @@ -672,7 +672,7 @@ under the License. - + attachVolume true @@ -687,7 +687,7 @@ under the License. - + attachVolume true @@ -702,7 +702,7 @@ under the License. - + attachVolume true @@ -732,7 +732,7 @@ under the License. - + stopVirtualMachine @@ -745,7 +745,7 @@ under the License. - + startVirtualMachine true @@ -757,8 +757,8 @@ under the License. - - + + createVolume @@ -774,7 +774,7 @@ under the License. diskofferingid globaldiskofferingid - + @@ -783,7 +783,7 @@ under the License. - + attachVolume true @@ -800,7 +800,7 @@ under the License. - + @@ -813,8 +813,8 @@ under the License. vmid - - + + listVolumes true @@ -830,8 +830,8 @@ under the License. - - + + deployVirtualMachine @@ -848,7 +848,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -856,7 +856,7 @@ under the License. account accountvar - + domainid 1 @@ -872,7 +872,7 @@ under the License. networkids networkid - + @@ -881,7 +881,7 @@ under the License. - + listVolumes @@ -901,7 +901,7 @@ under the License. - + detachVolume true @@ -912,7 +912,7 @@ under the License. - + rebootVirtualMachine true @@ -924,7 +924,7 @@ under the License. - + stopVirtualMachine @@ -936,7 +936,7 @@ under the License. - + startVirtualMachine true @@ -967,7 +967,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -975,7 +975,7 @@ under the License. account accountvar - + domainid 1 @@ -991,7 +991,7 @@ under the License. networkids networkid - + @@ -1000,7 +1000,7 @@ under the License. - + listVolumes @@ -1020,7 +1020,7 @@ under the License. - + detachVolume true @@ -1032,7 +1032,7 @@ under the License. - + listVolumes true @@ -1048,7 +1048,7 @@ under the License. - + attachVolume @@ -1064,7 +1064,7 @@ under the License. - + stopVirtualMachine true @@ -1075,7 +1075,7 @@ under the License. - + detachVolume true @@ -1087,7 +1087,7 @@ under the License. - + listVolumes true @@ -1103,7 +1103,7 @@ under the License. - + deployVirtualMachine @@ -1120,7 +1120,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -1128,7 +1128,7 @@ under the License. account accountvar - + domainid 1 @@ -1144,7 +1144,7 @@ under the License. networkids networkid - + @@ -1153,7 +1153,7 @@ under the License. - + deployVirtualMachine true @@ -1169,7 +1169,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -1177,7 +1177,7 @@ under the License. account accountvar - + domainid 1 @@ -1193,7 +1193,7 @@ under the License. networkids networkid - + @@ -1202,7 +1202,7 @@ under the License. - + createVolume true @@ -1217,7 +1217,7 @@ under the License. diskofferingid globaldiskofferingid - + @@ -1226,7 +1226,7 @@ under the License. - + attachVolume true @@ -1241,7 +1241,7 @@ under the License. - + attachVolume true @@ -1258,7 +1258,7 @@ under the License. - + destroyVirtualMachine @@ -1270,7 +1270,7 @@ under the License. - + createVolume true @@ -1285,7 +1285,7 @@ under the License. diskofferingid globaldiskofferingid - + @@ -1294,7 +1294,7 @@ under the License. - + attachVolume true @@ -1311,7 +1311,7 @@ under the License. - + listVolumes @@ -1332,7 +1332,7 @@ under the License. - + destroyVirtualMachine true @@ -1343,7 +1343,7 @@ under the License. - + detachVolume true @@ -1356,9 +1356,9 @@ under the License. - - - + + + deleteAccount Deleting Account as a part of cleanup diff --git a/test/metadata/func/snapshot_iso.xml b/test/metadata/func/snapshot_iso.xml index aabc8c113b4..6bddb393e71 100644 --- a/test/metadata/func/snapshot_iso.xml +++ b/test/metadata/func/snapshot_iso.xml @@ -18,38 +18,38 @@ under the License. --> - - + createUser Create a user - + username @@ -87,7 +87,7 @@ under the License. - + registerUserKeys Registering the user @@ -108,8 +108,8 @@ under the License. - - + + registerIso Register iso @@ -176,9 +176,9 @@ under the License. registerdisplay - - - + + + deployVirtualMachine Deploying VM from small diskOffering @@ -195,7 +195,7 @@ under the License. diskofferingid 5 - + templateid privateisoid @@ -224,7 +224,7 @@ under the License. - + listVolumes @@ -245,7 +245,7 @@ under the License. - + listVolumes @@ -265,7 +265,7 @@ under the License. - + sleep.sh @@ -277,7 +277,7 @@ under the License. 200 - + @@ -316,7 +316,7 @@ under the License. - + createTemplate @@ -346,7 +346,7 @@ under the License. - + deployVirtualMachine @@ -364,7 +364,7 @@ under the License. diskofferingid 5 - + templateid smalltemplateid @@ -394,7 +394,7 @@ under the License. - + createVolume Create a volume from smaller disk snapshot @@ -416,7 +416,7 @@ under the License. - + createVolume Create a volume from smaller disk snapshot @@ -456,7 +456,7 @@ under the License. - + attachVolume Attach small volume to a different VM @@ -472,7 +472,7 @@ under the License. - + listConfiguration Getting value of the expunge.delay configuration parameter @@ -489,8 +489,8 @@ under the License. - - + + rebootVirtualMachine @@ -502,7 +502,7 @@ under the License. - + stopVirtualMachine stop medium vm @@ -513,7 +513,7 @@ under the License. - + startVirtualMachine stop medium vm @@ -548,9 +548,9 @@ under the License. - - + + @@ -570,8 +570,8 @@ under the License. - - + + sleep.sh @@ -583,7 +583,7 @@ under the License. - + sleep.sh @@ -595,8 +595,8 @@ under the License. - - + + @@ -617,7 +617,7 @@ under the License. - + destroyVirtualMachine @@ -629,8 +629,8 @@ under the License. - - + + deleteUser @@ -642,5 +642,5 @@ under the License. - + diff --git a/test/metadata/func/snapshots.xml b/test/metadata/func/snapshots.xml index 578698ad67e..843b5dfe1c5 100644 --- a/test/metadata/func/snapshots.xml +++ b/test/metadata/func/snapshots.xml @@ -28,7 +28,7 @@ under the License. d.List the Data Disk of the Medium VM 5.Create a Snapshot from the root disk of the Small VM 6.Create a Snapshot from the data disk of the Small VM - 7.Create a Snapshot from the root disk of the Medium VM + 7.Create a Snapshot from the root disk of the Medium VM 8.Create a Snapshot from the data disk of the Medium VM 9.Create a Template from the snapshot taken of the root disk of the Small VM 10.Create a Template from the snapshot taken of the root disk of the Medium VM @@ -38,9 +38,9 @@ under the License. 14.Create a Volume from the snapshot taken of the root disk of the Medium VM 15.Attach the volume created from the snapshot taken of the root disk of the Small VM to the VM created from the template created from the snapshot taken of the root disk of the Small VM 16.Attach the volume created from the snapshot taken of the root disk of the Medium VM to the VM created from the template created from the snapshot taken of the root disk of the Medium VM - 17.Reboot,start,stop,destroy VM created from the template created from the snapshot taken of the root disk of the Small VM - 18.Reboot,start,stop,destroy VM created from the template created from the snapshot taken of the root disk of the Medium VM - 19.Clean Up + 17.Reboot,start,stop,destroy VM created from the template created from the snapshot taken of the root disk of the Small VM + 18.Reboot,start,stop,destroy VM created from the template created from the snapshot taken of the root disk of the Medium VM + 19.Clean Up --> @@ -103,20 +103,20 @@ under the License. account accountvar - + id userid - + - +--> registerUserKeys Registering the user @@ -173,8 +173,8 @@ under the License. secretkey - - + + createNetwork [Creating default network] @@ -211,7 +211,7 @@ under the License. - + deployVirtualMachine Deploying vm from small diskOffering @@ -228,7 +228,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -261,7 +261,7 @@ under the License. - + deployVirtualMachine Deploying vm from Medium disk offering @@ -278,7 +278,7 @@ under the License. diskofferingid mediumglobaldiskofferingid - + templateid globaltemplateid @@ -319,7 +319,7 @@ under the License. - + listVolumes @@ -339,7 +339,7 @@ under the License. - + listVolumes @@ -359,7 +359,7 @@ under the License. - + listVolumes @@ -379,7 +379,7 @@ under the License. - + listVolumes @@ -411,7 +411,7 @@ under the License. 200 - + @@ -431,7 +431,7 @@ under the License. - + createSnapshot Creating snapshot of Data disk of small vm @@ -449,7 +449,7 @@ under the License. - + createSnapshot Creating snapshot of ROOT disk of medium vm @@ -467,7 +467,7 @@ under the License. - + createSnapshot Creating snapshot of DATA disk of medium vm @@ -485,7 +485,7 @@ under the License. - + createTemplate @@ -515,7 +515,7 @@ under the License. - + createTemplate Creating template of ROOT snapshot of large vm @@ -544,7 +544,7 @@ under the License. - + deployVirtualMachine Deploy a vm from template created from snapshot of ROOT disk of small vm @@ -561,7 +561,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid smalltemplateid @@ -602,7 +602,7 @@ under the License. - + deployVirtualMachine Deploy a vm from template created from snapshot of ROOT disk of medium vm @@ -619,7 +619,7 @@ under the License. diskofferingid mediumglobaldiskofferinigid - + templateid largetemplateid @@ -703,8 +703,8 @@ under the License. largevolumeid - - + + attachVolume Attach small volume to the vm created from small snapshot @@ -720,7 +720,7 @@ under the License. - + attachVolume Attach medium volume to the vm created from medium snapshot @@ -736,8 +736,8 @@ under the License. - - + + rebootVirtualMachine @@ -749,7 +749,7 @@ under the License. - + stopVirtualMachine stop medium vm @@ -760,7 +760,7 @@ under the License. - + startVirtualMachine stop medium vm @@ -784,7 +784,7 @@ under the License. - + destroyVirtualMachine Destroy large vm @@ -796,7 +796,7 @@ under the License. - + @@ -808,9 +808,9 @@ under the License. accountid - + - - - + + diff --git a/test/metadata/func/snapshots_contd.xml b/test/metadata/func/snapshots_contd.xml index d0532cd89d2..c9b277cb696 100644 --- a/test/metadata/func/snapshots_contd.xml +++ b/test/metadata/func/snapshots_contd.xml @@ -18,14 +18,14 @@ under the License. --> - - + createUser Create a user - + username @@ -87,7 +87,7 @@ under the License. - + registerUserKeys Registering the user @@ -107,9 +107,9 @@ under the License. secretkey - - - + + + deployVirtualMachine Deploying VM from small diskOffering @@ -126,7 +126,7 @@ under the License. diskofferingid 5 - + templateid globaltemplateid @@ -155,7 +155,7 @@ under the License. - + listVolumes @@ -176,7 +176,7 @@ under the License. - + listVolumes @@ -196,7 +196,7 @@ under the License. - + sleep.sh @@ -208,7 +208,7 @@ under the License. 200 - + @@ -247,7 +247,7 @@ under the License. - + createTemplate @@ -277,7 +277,7 @@ under the License. - + deployVirtualMachine @@ -295,7 +295,7 @@ under the License. diskofferingid 5 - + templateid smalltemplateid @@ -325,7 +325,7 @@ under the License. - + createVolume Create a volume from smaller disk snapshot @@ -347,7 +347,7 @@ under the License. - + createVolume Create a volume from smaller disk snapshot @@ -369,7 +369,7 @@ under the License. - + createSnapshot Creating snapshot of Detached Volume @@ -406,7 +406,7 @@ under the License. - + attachVolume Attach small volume to a different VM @@ -422,9 +422,9 @@ under the License. - - - + + + rebootVirtualMachine @@ -436,7 +436,7 @@ under the License. - + stopVirtualMachine stop medium vm @@ -447,7 +447,7 @@ under the License. - + startVirtualMachine stop medium vm @@ -483,7 +483,7 @@ under the License. - + destroyVirtualMachine @@ -495,9 +495,9 @@ under the License. - - + + @@ -517,8 +517,8 @@ under the License. - - + + sleep.sh @@ -530,7 +530,7 @@ under the License. - + sleep.sh @@ -542,8 +542,8 @@ under the License. - - + + @@ -564,7 +564,7 @@ under the License. - + destroyVirtualMachine @@ -576,7 +576,7 @@ under the License. - + listConfiguration Getting value of the expunge.delay configuration parameter @@ -593,7 +593,7 @@ under the License. - + listConfiguration Getting value of the expunge.delay configuration parameter @@ -609,8 +609,8 @@ under the License. expunge.delay - - + + deleteUser @@ -622,5 +622,5 @@ under the License. - + diff --git a/test/metadata/func/static_nat.xml b/test/metadata/func/static_nat.xml index 144ad9d5ae0..7694affdab8 100644 --- a/test/metadata/func/static_nat.xml +++ b/test/metadata/func/static_nat.xml @@ -18,9 +18,9 @@ under the License. --> - + createUser - Create a regular user + Create a regular user username @@ -57,7 +57,7 @@ under the License. - + registerUserKeys Registering the user @@ -78,7 +78,7 @@ under the License. - + deployVirtualMachine Deploying virtual machine @@ -95,7 +95,7 @@ under the License. diskofferingid 5 - + templateid globaltemplateid @@ -127,9 +127,9 @@ under the License. vmipaddress - - - + + + deployVirtualMachine Deploying virtual machine @@ -146,7 +146,7 @@ under the License. diskofferingid 5 - + templateid globaltemplateid @@ -178,8 +178,8 @@ under the License. vmipaddress1 - - + + sleep.sh @@ -190,8 +190,8 @@ under the License. 60 - - + + listRouters 35 @@ -219,9 +219,9 @@ under the License. sourcenatpublicip - - - + + + associateIpAddress true @@ -231,15 +231,15 @@ under the License. globalzoneid - + ipaddress nonsourcenatpublicip - - + + createIpForwardingRule @@ -257,8 +257,8 @@ under the License. - - + + createIpForwardingRule @@ -274,14 +274,14 @@ under the License. vmid - + id ruleid - + ssh.sh @@ -297,8 +297,8 @@ under the License. - - + + deleteIpForwardingRule @@ -311,7 +311,7 @@ under the License. - + listIpForwardingRules true @@ -324,7 +324,7 @@ under the License. - + ssh.sh @@ -341,8 +341,8 @@ under the License. - - + + createIpForwardingRule @@ -358,14 +358,14 @@ under the License. vmid - + id ruleid - + listIpForwardingRules true @@ -378,7 +378,7 @@ under the License. - + listIpForwardingRules true @@ -391,8 +391,8 @@ under the License. - - + + listIpForwardingRules true @@ -405,7 +405,7 @@ under the License. - + disassociateIpAddress @@ -417,7 +417,7 @@ under the License. - + listIpForwardingRules true @@ -425,7 +425,7 @@ under the License. false - + associateIpAddress @@ -436,15 +436,15 @@ under the License. globalzoneid - + ipaddress nonsourcenatpublicip - - + + createIpForwardingRule true @@ -459,14 +459,14 @@ under the License. vmid - + id ruleid - + createIpForwardingRule true @@ -483,8 +483,8 @@ under the License. - - + + destroyVirtualMachine @@ -496,7 +496,7 @@ under the License. - + listConfigurations Getting expunge interval value @@ -513,7 +513,7 @@ under the License. - + sleep.sh @@ -524,15 +524,15 @@ under the License. expunge.interval - - + + listIpForwardingRules true true Test case 1140: static nat rules should be removed once corresponding vm is expunged - + @@ -544,14 +544,14 @@ under the License. globalzoneid - + ipaddress nonsourcenatpublicip1 - + associateIpAddress true @@ -561,14 +561,14 @@ under the License. globalzoneid - + ipaddress nonsourcenatpublicip2 - + createIpForwardingRule true @@ -584,7 +584,7 @@ under the License. - + createIpForwardingRule true @@ -600,8 +600,8 @@ under the License. - - + + ssh.sh @@ -617,8 +617,8 @@ under the License. - - + + ssh.sh @@ -634,8 +634,8 @@ under the License. - - + + rebootRouter @@ -647,7 +647,7 @@ under the License. - + ssh.sh @@ -675,8 +675,8 @@ under the License. routerid - - + + startRouter Starting router... @@ -686,8 +686,8 @@ under the License. routerid - - + + ssh.sh @@ -703,7 +703,7 @@ under the License. - + @@ -722,7 +722,7 @@ under the License. diskofferingid 5 - + templateid globaltemplateid @@ -754,8 +754,8 @@ under the License. vmipaddress1 - - + + sleep.sh @@ -766,8 +766,8 @@ under the License. 60 - - + + associateIpAddress true @@ -777,14 +777,14 @@ under the License. globalzoneid - + ipaddress nonsourcenatpublicip - - + + createPortForwardingRule true @@ -815,8 +815,8 @@ under the License. vmid - - + + ssh.sh @@ -832,7 +832,7 @@ under the License. - + createIpForwardingRule true @@ -847,14 +847,14 @@ under the License. vmid1 - + id ruleid - + ssh.sh @@ -870,7 +870,7 @@ under the License. - + deleteIpForwardingRule true @@ -882,7 +882,7 @@ under the License. - + ssh.sh @@ -898,9 +898,9 @@ under the License. - - - + + + deleteUser Deleting the user @@ -910,7 +910,7 @@ under the License. userid - + diff --git a/test/metadata/func/templatedwnldstress.xml b/test/metadata/func/templatedwnldstress.xml index 19d12a2d9ef..0edb82eb1fe 100644 --- a/test/metadata/func/templatedwnldstress.xml +++ b/test/metadata/func/templatedwnldstress.xml @@ -18,18 +18,18 @@ under the License. --> - @@ -76,7 +76,7 @@ under the License. - + createNetwork @@ -114,7 +114,7 @@ under the License. - + deployVirtualMachine [Deploy a Virtual Machine under the Account] @@ -159,7 +159,7 @@ under the License. - + listVolumes [List the ROOT Volume of the VM] @@ -180,7 +180,7 @@ under the License. - + stopVirtualMachine [Stop virtual machine] @@ -191,7 +191,7 @@ under the License. - + createTemplate [Create template from root volume] @@ -219,7 +219,7 @@ under the License. - + deployVirtualMachine [Deploying virtual machine from the Private Template] @@ -235,7 +235,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid privatetemplateid @@ -272,7 +272,7 @@ under the License. - + listHosts List host where vm is running @@ -289,7 +289,7 @@ under the License. - + select install_path from template_spool_ref true @@ -307,7 +307,7 @@ under the License. - + listvdi.sh @@ -323,7 +323,7 @@ under the License. - + destroyVirtualMachine Destroy virtual machine created from the private template @@ -333,9 +333,9 @@ under the License. vmid1 - - - + + + listConfigurations Getting expunge interval value @@ -352,7 +352,7 @@ under the License. - + updateConfiguration Updating storage.cleanup.interval @@ -367,7 +367,7 @@ under the License. - + listConfigurations Getting expunge interval value @@ -384,7 +384,7 @@ under the License. - + ms.sh @@ -411,9 +411,9 @@ under the License. expunge.interval - - - + + + sleep.sh @@ -424,8 +424,8 @@ under the License. expunge.interval - - + + sleep.sh @@ -436,8 +436,8 @@ under the License. storage.cleanup.interval - - + + sleep.sh @@ -448,10 +448,10 @@ under the License. storage.cleanup.interval - - + + - + listvdi.sh true @@ -470,7 +470,7 @@ under the License. - + deployVirtualMachine Deploying virtual machine from the private template @@ -486,7 +486,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid privatetemplateid @@ -523,7 +523,7 @@ under the License. - + listHosts List host where vm is running @@ -540,7 +540,7 @@ under the License. - + select install_path from template_spool_ref true @@ -558,7 +558,7 @@ under the License. - + listvdi.sh @@ -574,7 +574,7 @@ under the License. - + destroyVirtualMachine Destroy virtual machine created from the private template @@ -584,9 +584,9 @@ under the License. vmid1 - - - + + + listConfigurations Getting expunge interval value @@ -603,7 +603,7 @@ under the License. - + sleep.sh @@ -614,9 +614,9 @@ under the License. expunge.interval - - - + + + sleep.sh @@ -627,8 +627,8 @@ under the License. expunge.interval - - + + sleep.sh @@ -639,8 +639,8 @@ under the License. storage.cleanup.interval - - + + sleep.sh @@ -651,10 +651,10 @@ under the License. storage.cleanup.interval - - + + - + listvdi.sh true @@ -673,8 +673,8 @@ under the License. - - + + startVirtualMachine [Start virtual machine] @@ -691,7 +691,7 @@ under the License. - + rebootVirtualMachine [Reboot Virtual Machine] @@ -702,7 +702,7 @@ under the License. - + destroyVirtualMachine [Destroy virtual machine] @@ -713,7 +713,7 @@ under the License. - + listVirtualMachines [Verify that vm was destroyed] @@ -730,7 +730,7 @@ under the License. - + deleteTemplate Delete private template @@ -741,7 +741,7 @@ under the License. - + deleteAccount [Delete account] @@ -751,7 +751,7 @@ under the License. accountid - - - + + + diff --git a/test/metadata/func/userapi.xml b/test/metadata/func/userapi.xml index de03b9794da..67246cbb0bc 100644 --- a/test/metadata/func/userapi.xml +++ b/test/metadata/func/userapi.xml @@ -64,7 +64,7 @@ under the License. name accountname - + @@ -77,7 +77,7 @@ under the License. account accountname - + id @@ -86,10 +86,10 @@ under the License. username usernameparam - + - + createUser @@ -127,11 +127,11 @@ under the License. domainid 1 - + - - + + +--> @@ -243,7 +243,7 @@ under the License. password password - + +--> createUser @@ -296,7 +296,7 @@ under the License. 0 - username + username password @@ -317,7 +317,7 @@ under the License. domainid 1 - + @@ -326,7 +326,7 @@ under the License. seconduserid - + updateUser 593 @@ -356,7 +356,7 @@ under the License. - + deleteUser @@ -367,7 +367,7 @@ under the License. - + deleteUser 58 @@ -379,8 +379,8 @@ under the License. - - + + listUsers 59 @@ -392,7 +392,7 @@ under the License. - + listUsers @@ -405,7 +405,7 @@ under the License. - + updateUser @@ -421,7 +421,7 @@ under the License. - + updateUser @@ -437,7 +437,7 @@ under the License. - + createUser @@ -473,7 +473,7 @@ under the License. - + createUser @@ -512,8 +512,8 @@ under the License. - - + + createUser @@ -541,8 +541,8 @@ under the License. - - + + createUser 47 @@ -569,7 +569,7 @@ under the License. - + createUser 47 @@ -596,7 +596,7 @@ under the License. - + createUser 47 @@ -623,8 +623,8 @@ under the License. - - + + createDomain @@ -643,8 +643,8 @@ under the License. newdomainid - - + + createAccount [Create the Account and a User in the New Domain] @@ -690,7 +690,7 @@ under the License. name accountname1 - + @@ -703,7 +703,7 @@ under the License. account accountname1 - + id @@ -712,11 +712,11 @@ under the License. username usernameparam - + - - + +--> createUser 51 @@ -832,8 +832,8 @@ under the License. - - + + createUser @@ -871,7 +871,7 @@ under the License. - + createUser @@ -909,7 +909,7 @@ under the License. - + createDomain @@ -928,7 +928,7 @@ under the License. - + createDomain 64 @@ -940,9 +940,9 @@ under the License. - - + + deleteDomain @@ -952,7 +952,7 @@ under the License. - + listDomains 65 @@ -964,8 +964,8 @@ under the License. - - + + +--> deleteDomain 66 @@ -1032,7 +1032,7 @@ under the License. - + listDomains @@ -1045,8 +1045,8 @@ under the License. - - + + createUser @@ -1094,7 +1094,7 @@ under the License. - + deployVirtualMachine @@ -1109,7 +1109,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -1136,7 +1136,7 @@ under the License. - + deleteUser @@ -1146,7 +1146,7 @@ under the License. - + listEvents 601 @@ -1157,7 +1157,7 @@ under the License. - + @@ -1206,7 +1206,7 @@ under the License. - + registerUserKeys @@ -1225,8 +1225,8 @@ under the License. secretkey - - + + listVirtualMachines true @@ -1239,8 +1239,8 @@ under the License. - - + + deleteDomain @@ -1254,8 +1254,8 @@ under the License. true - - + + +--> deleteAccount id - accountid - - - - - - deleteAccount - - - id - accountid1 + accountid - + + + deleteAccount + + + id + accountid1 + + + + diff --git a/test/metadata/func/vmapi.xml b/test/metadata/func/vmapi.xml index 8c1263c7767..fbda66fe5d7 100644 --- a/test/metadata/func/vmapi.xml +++ b/test/metadata/func/vmapi.xml @@ -16,11 +16,11 @@ KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> - + - @@ -80,7 +80,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -121,7 +121,7 @@ under the License. - + destroyVirtualMachine [Destroy Virtual Machine] @@ -132,7 +132,7 @@ under the License. - + listVirtualMachines 72 [List the Destroyed Virutal Machine] @@ -147,7 +147,7 @@ under the License. - + startVirtualMachine @@ -160,7 +160,7 @@ under the License. - + listVirtualMachines @@ -176,8 +176,8 @@ under the License. - - + + createAccount @@ -223,7 +223,7 @@ under the License. name accountname - + @@ -237,7 +237,7 @@ under the License. accountname - + id @@ -251,7 +251,7 @@ under the License. - +--> createNetwork @@ -328,7 +328,7 @@ under the License. - + deployVirtualMachine [Deploy Virtual Machine in the new Account] @@ -344,7 +344,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -385,7 +385,7 @@ under the License. - + deleteUser [Delete User] @@ -395,7 +395,7 @@ under the License. userid - + deleteAccount @@ -406,8 +406,8 @@ under the License. accountid - - + + listVirtualMachines 74 [Check that no VMs present under the Account(List VM for a Removed User exercise END)] @@ -423,7 +423,7 @@ under the License. - + updateVirtualMachine @@ -439,8 +439,8 @@ under the License. true - - + + updateVirtualMachine @@ -456,10 +456,10 @@ under the License. true - - + - + + recoverVirtualMachine [Recover the Destroyed VM] @@ -470,7 +470,7 @@ under the License. - + resetPasswordForVirtualMachine 79-1 [Verify that one can reset password for VM in stopped state] @@ -482,7 +482,7 @@ under the License. - + updateVirtualMachine @@ -497,10 +497,10 @@ under the License. true - + - + startVirtualMachine [Start Virtual Machine (reset password for VM in running state exercise BEGIN)] @@ -511,7 +511,7 @@ under the License. - + resetPasswordForVirtualMachine 79-2 [Verify that one can reset password for VM in running state] @@ -522,7 +522,7 @@ under the License. - + @@ -538,9 +538,9 @@ under the License. false - - - + + + destroyVirtualMachine @@ -552,7 +552,7 @@ under the License. - + resetPasswordForVirtualMachine 80 [Verify that one can't reset password for VM in destroyed state] @@ -564,8 +564,8 @@ under the License. - - + + rebootVirtualMachine 89 [Verify that one can't reboot destroyed VM] @@ -578,7 +578,7 @@ under the License. - + deployVirtualMachine @@ -595,7 +595,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -636,7 +636,7 @@ under the License. - + resetPasswordForVirtualMachine 81 [Check whether password can be reset on virtual machine made from password disabled VM] @@ -648,7 +648,7 @@ under the License. - + changeServiceForVirtualMachine @@ -677,7 +677,7 @@ under the License. - + changeServiceForVirtualMachine 83 [Try to change service offering to the one that VM already has -ve case] @@ -693,8 +693,8 @@ under the License. - - + + rebootVirtualMachine @@ -707,7 +707,7 @@ under the License. - + @@ -721,7 +721,7 @@ under the License. - + startVirtualMachine [Start virtual machine] @@ -732,7 +732,7 @@ under the License. - + recoverVirtualMachine 813 [Try to recover running virtual machine] @@ -763,7 +763,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -789,8 +789,8 @@ under the License. globalnetworkid - - + + deployVirtualMachine @@ -808,7 +808,7 @@ under the License. diskofferingid globaldiskofferingid - + templateid globaltemplateid @@ -834,8 +834,8 @@ under the License. networkid - - + + destroyVirtualMachine @@ -848,7 +848,7 @@ under the License. - + rebootVirtualMachine @@ -861,7 +861,7 @@ under the License. - + startVirtualMachine @@ -886,6 +886,6 @@ under the License. - - - + + + diff --git a/test/metadata/func/vmsync.xml b/test/metadata/func/vmsync.xml index 0e7a69749f9..c0b34c9c37a 100644 --- a/test/metadata/func/vmsync.xml +++ b/test/metadata/func/vmsync.xml @@ -23,12 +23,12 @@ under the License. deployVirtualMachine deploy HA enabled VM test case - + diskofferingid globaldiskofferingid - + serviceofferingid globalserviceofferingid @@ -40,7 +40,7 @@ under the License. zoneid globalzoneid - + displayname HAenabledVM @@ -60,7 +60,7 @@ under the License. networkids globalnetworkid - + @@ -82,12 +82,12 @@ under the License. deployVirtualMachine deploy HA disabled VM test case - + diskofferingid globaldiskofferingid - + serviceofferingid globalserviceofferingid @@ -99,7 +99,7 @@ under the License. zoneid globalzoneid - + displayname HAdisabledVM @@ -119,7 +119,7 @@ under the License. networkids globalnetworkid - + @@ -174,7 +174,7 @@ under the License. haenable false - + @@ -196,7 +196,7 @@ under the License. - + @@ -233,8 +233,8 @@ under the License. instance_name_db - - + + select instance_name from vm_instance true @@ -251,7 +251,7 @@ under the License. instance_name_en - + @@ -259,13 +259,13 @@ under the License. killvm.sh Kill the HA enabled VM - + n instance_name_en - + h ipaddress_en @@ -278,13 +278,13 @@ under the License. killvm.sh Kill the HA disabled VM - + n instance_name_db - + h ipaddress_db @@ -303,7 +303,7 @@ under the License. 300 - + @@ -321,10 +321,10 @@ under the License. state Stopped - + - - + + listVirtualMachines List virtual machine with ha enabled @@ -393,7 +393,7 @@ under the License. - + destroyVirtualMachine Destroy ha enabled vm as a part of cleanup diff --git a/test/scripts/usercloud.properties b/test/scripts/usercloud.properties index 514e98058a6..e2b647e5de2 100644 --- a/test/scripts/usercloud.properties +++ b/test/scripts/usercloud.properties @@ -5,9 +5,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/test/src-not-used/main/java/com/cloud/sample/UserCloudAPIExecutor.java b/test/src-not-used/main/java/com/cloud/sample/UserCloudAPIExecutor.java deleted file mode 100644 index 6baadb8b035..00000000000 --- a/test/src-not-used/main/java/com/cloud/sample/UserCloudAPIExecutor.java +++ /dev/null @@ -1,188 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.sample; - -import java.io.FileInputStream; -import java.net.URLEncoder; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Properties; -import java.util.StringTokenizer; - -import javax.crypto.Mac; -import javax.crypto.spec.SecretKeySpec; - -import org.apache.commons.codec.binary.Base64; -import org.apache.commons.httpclient.HttpClient; -import org.apache.commons.httpclient.HttpMethod; -import org.apache.commons.httpclient.methods.GetMethod; - -/** - * - * - * - * - * - * - * - * - * - */ - -/** - * Sample CloudStack Management User API Executor. - * - * Prerequisites: - Edit usercloud.properties to include your host, apiUrl, apiKey, and secretKey - Use ./executeUserAPI.sh to - * execute this test class - * - * - */ -public class UserCloudAPIExecutor { - public static void main(String[] args) { - // Host - String host = null; - - // Fully qualified URL with http(s)://host:port - String apiUrl = null; - - // ApiKey and secretKey as given by your CloudStack vendor - String apiKey = null; - String secretKey = null; - - try { - Properties prop = new Properties(); - prop.load(new FileInputStream("usercloud.properties")); - - // host - host = prop.getProperty("host"); - if (host == null) { - System.out.println("Please specify a valid host in the format of http(s)://:/client/api in your usercloud.properties file."); - } - - // apiUrl - apiUrl = prop.getProperty("apiUrl"); - if (apiUrl == null) { - System.out.println("Please specify a valid API URL in the format of command=¶m1=¶m2=... in your usercloud.properties file."); - } - - // apiKey - apiKey = prop.getProperty("apiKey"); - if (apiKey == null) { - System.out.println("Please specify your API Key as provided by your CloudStack vendor in your usercloud.properties file."); - } - - // secretKey - secretKey = prop.getProperty("secretKey"); - if (secretKey == null) { - System.out.println("Please specify your secret Key as provided by your CloudStack vendor in your usercloud.properties file."); - } - - if (apiUrl == null || apiKey == null || secretKey == null) { - return; - } - - System.out.println("Constructing API call to host = '" + host + "' with API command = '" + apiUrl + "' using apiKey = '" + apiKey + "' and secretKey = '" + - secretKey + "'"); - - // Step 1: Make sure your APIKey is URL encoded - String encodedApiKey = URLEncoder.encode(apiKey, "UTF-8"); - - // Step 2: URL encode each parameter value, then sort the parameters and apiKey in - // alphabetical order, and then toLowerCase all the parameters, parameter values and apiKey. - // Please note that if any parameters with a '&' as a value will cause this test client to fail since we are using - // '&' to delimit - // the string - List sortedParams = new ArrayList(); - sortedParams.add("apikey=" + encodedApiKey.toLowerCase()); - StringTokenizer st = new StringTokenizer(apiUrl, "&"); - String url = null; - boolean first = true; - while (st.hasMoreTokens()) { - String paramValue = st.nextToken(); - String param = paramValue.substring(0, paramValue.indexOf("=")); - String value = URLEncoder.encode(paramValue.substring(paramValue.indexOf("=") + 1, paramValue.length()), "UTF-8"); - if (first) { - url = param + "=" + value; - first = false; - } else { - url = url + "&" + param + "=" + value; - } - sortedParams.add(param.toLowerCase() + "=" + value.toLowerCase()); - } - Collections.sort(sortedParams); - - System.out.println("Sorted Parameters: " + sortedParams); - - // Step 3: Construct the sorted URL and sign and URL encode the sorted URL with your secret key - String sortedUrl = null; - first = true; - for (String param : sortedParams) { - if (first) { - sortedUrl = param; - first = false; - } else { - sortedUrl = sortedUrl + "&" + param; - } - } - System.out.println("sorted URL : " + sortedUrl); - String encodedSignature = signRequest(sortedUrl, secretKey); - - // Step 4: Construct the final URL we want to send to the CloudStack Management Server - // Final result should look like: - // http(s)://://client/api?&apiKey=&signature= - String finalUrl = host + "?" + url + "&apiKey=" + apiKey + "&signature=" + encodedSignature; - System.out.println("final URL : " + finalUrl); - - // Step 5: Perform a HTTP GET on this URL to execute the command - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(finalUrl); - int responseCode = client.executeMethod(method); - if (responseCode == 200) { - // SUCCESS! - System.out.println("Successfully executed command"); - } else { - // FAILED! - System.out.println("Unable to execute command with response code: " + responseCode); - } - - } catch (Throwable t) { - System.out.println(t); - } - } - - /** - * 1. Signs a string with a secret key using SHA-1 2. Base64 encode the result 3. URL encode the final result - * - * @param request - * @param key - * @return - */ - public static String signRequest(String request, String key) { - try { - Mac mac = Mac.getInstance("HmacSHA1"); - SecretKeySpec keySpec = new SecretKeySpec(key.getBytes(), "HmacSHA1"); - mac.init(keySpec); - mac.update(request.getBytes()); - byte[] encryptedBytes = mac.doFinal(); - return URLEncoder.encode(Base64.encodeBase64String(encryptedBytes), "UTF-8"); - } catch (Exception ex) { - System.out.println(ex); - } - return null; - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/longrun/BuildGuestNetwork.java b/test/src-not-used/main/java/com/cloud/test/longrun/BuildGuestNetwork.java deleted file mode 100644 index 7a56725681b..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/longrun/BuildGuestNetwork.java +++ /dev/null @@ -1,124 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.longrun; - -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.Random; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - -public class BuildGuestNetwork { - - protected Logger logger = LogManager.getLogger(getClass()); - private static final int ApiPort = 8096; - private static final int DeveloperPort = 8080; - private static final String ApiUrl = "/client/api"; - private static int numVM = 1; - private static long zoneId = -1L; - private static long templateId = 3; - private static long serviceOfferingId = 1; - - public static void main(String[] args) { - - List argsList = Arrays.asList(args); - Iterator iter = argsList.iterator(); - String host = "http://localhost"; - int numThreads = 1; - - while (iter.hasNext()) { - String arg = iter.next(); - if (arg.equals("-h")) { - host = "http://" + iter.next(); - } - if (arg.equals("-t")) { - numThreads = Integer.parseInt(iter.next()); - } - if (arg.equals("-n")) { - numVM = Integer.parseInt(iter.next()); - } - if (arg.equals("-z")) { - zoneId = Integer.parseInt(iter.next()); - } - - if (arg.equals("-e")) { - templateId = Integer.parseInt(iter.next()); - } - - if (arg.equals("-s")) { - serviceOfferingId = Integer.parseInt(iter.next()); - } - } - - final String server = host + ":" + ApiPort + "/"; - final String developerServer = host + ":" + DeveloperPort + ApiUrl; - logger.info("Starting test in " + numThreads + " thread(s). Each thread is launching " + numVM + " VMs"); - - for (int i = 0; i < numThreads; i++) { - new Thread(new Runnable() { - @Override - public void run() { - try { - - String username = null; - String singlePrivateIp = null; - Random ran = new Random(); - username = Math.abs(ran.nextInt()) + "-user"; - - //Create User - User myUser = new User(username, username, server, developerServer); - try { - myUser.launchUser(); - myUser.registerUser(); - } catch (Exception e) { - logger.warn("Error code: ", e); - } - - if (myUser.getUserId() != null) { - logger.info("User " + myUser.getUserName() + " was created successfully, starting VM creation"); - //create VMs for the user - for (int i = 0; i < numVM; i++) { - //Create a new VM, add it to the list of user's VMs - VirtualMachine myVM = new VirtualMachine(myUser.getUserId()); - myVM.deployVM(zoneId, serviceOfferingId, templateId, myUser.getDeveloperServer(), myUser.getApiKey(), myUser.getSecretKey()); - myUser.getVirtualMachines().add(myVM); - singlePrivateIp = myVM.getPrivateIp(); - - if (singlePrivateIp != null) { - logger.info("VM with private Ip " + singlePrivateIp + " was successfully created"); - } else { - logger.info("Problems with VM creation for a user" + myUser.getUserName()); - logger.info("Deployment failed"); - break; - } - } - - logger.info("Deployment done..." + numVM + " VMs were created."); - } - - } catch (Exception e) { - logger.error(e); - } - } - }).start(); - - } - } - -} diff --git a/test/src-not-used/main/java/com/cloud/test/longrun/GuestNetwork.java b/test/src-not-used/main/java/com/cloud/test/longrun/GuestNetwork.java deleted file mode 100644 index 7e90d71020c..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/longrun/GuestNetwork.java +++ /dev/null @@ -1,108 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.longrun; - -import java.util.ArrayList; -import java.util.Random; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.apache.log4j.NDC; - -import com.trilead.ssh2.Connection; -import com.trilead.ssh2.Session; - -public class GuestNetwork implements Runnable { - protected Logger logger = LogManager.getLogger(getClass()); - - private String publicIp; - private ArrayList virtualMachines; - private int retryNum; - - public GuestNetwork(String publicIp, int retryNum) { - this.publicIp = publicIp; - this.retryNum = retryNum; - } - - public ArrayList getVirtualMachines() { - return virtualMachines; - } - - public void setVirtualMachines(ArrayList virtualMachines) { - this.virtualMachines = virtualMachines; - } - - @Override - public void run() { - NDC.push("Following thread has started" + Thread.currentThread().getName()); - int retry = 0; - - //Start copying files between machines in the network - logger.info("The size of the array is " + this.virtualMachines.size()); - while (true) { - try { - if (retry > 0) { - logger.info("Retry attempt : " + retry + " ...sleeping 120 seconds before next attempt"); - Thread.sleep(120000); - } - for (VirtualMachine vm : this.virtualMachines) { - - logger.info("Attempting to SSH into linux host " + this.publicIp + " with retry attempt: " + retry); - Connection conn = new Connection(this.publicIp); - conn.connect(null, 600000, 600000); - - logger.info("SSHed successfully into linux host " + this.publicIp); - - boolean isAuthenticated = conn.authenticateWithPassword("root", "password"); - - if (isAuthenticated == false) { - logger.info("Authentication failed"); - } - //execute copy command - Session sess = conn.openSession(); - String fileName; - Random ran = new Random(); - fileName = Math.abs(ran.nextInt()) + "-file"; - String copyCommand = new String("./scpScript " + vm.getPrivateIp() + " " + fileName); - logger.info("Executing " + copyCommand); - sess.execCommand(copyCommand); - Thread.sleep(120000); - sess.close(); - - //execute wget command - sess = conn.openSession(); - String downloadCommand = - new String("wget http://172.16.0.220/scripts/checkDiskSpace.sh; chmod +x *sh; ./checkDiskSpace.sh; rm -rf checkDiskSpace.sh"); - logger.info("Executing " + downloadCommand); - sess.execCommand(downloadCommand); - Thread.sleep(120000); - sess.close(); - - //close the connection - conn.close(); - } - } catch (Exception ex) { - logger.error(ex); - retry++; - if (retry == retryNum) { - logger.info("Performance Guest Network test failed with error " + ex.getMessage()); - } - } - } - - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/longrun/PerformanceWithAPI.java b/test/src-not-used/main/java/com/cloud/test/longrun/PerformanceWithAPI.java deleted file mode 100644 index 821b5012bb0..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/longrun/PerformanceWithAPI.java +++ /dev/null @@ -1,191 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.longrun; - -import java.io.IOException; -import java.io.InputStream; -import java.net.URLEncoder; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Random; - -import org.apache.commons.httpclient.HttpClient; -import org.apache.commons.httpclient.HttpMethod; -import org.apache.commons.httpclient.methods.GetMethod; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - -import com.cloud.test.stress.TestClientWithAPI; - -public class PerformanceWithAPI { - - protected Logger logger = LogManager.getLogger(getClass()); - private static final int Retry = 10; - private static final int ApiPort = 8096; - private static int s_numVM = 2; - private static final long ZoneId = -1L; - private static final long TemplateId = 3; - private static final long ServiceOfferingId = 1; - private static final String ApiUrl = "/client/api"; - private static final int DeveloperPort = 8080; - - public static void main(String[] args) { - - List argsList = Arrays.asList(args); - Iterator iter = argsList.iterator(); - String host = "http://localhost"; - int numThreads = 1; - - while (iter.hasNext()) { - String arg = iter.next(); - if (arg.equals("-h")) { - host = "http://" + iter.next(); - } - if (arg.equals("-t")) { - numThreads = Integer.parseInt(iter.next()); - } - if (arg.equals("-n")) { - s_numVM = Integer.parseInt(iter.next()); - } - } - - final String server = host + ":" + ApiPort + "/"; - final String developerServer = host + ":" + DeveloperPort + ApiUrl; - - logger.info("Starting test in " + numThreads + " thread(s). Each thread is launching " + s_numVM + " VMs"); - - for (int i = 0; i < numThreads; i++) { - new Thread(new Runnable() { - @Override - public void run() { - try { - - String username = null; - String singlePrivateIp = null; - String singlePublicIp = null; - Random ran = new Random(); - username = Math.abs(ran.nextInt()) + "-user"; - - //Create User - User myUser = new User(username, username, server, developerServer); - try { - myUser.launchUser(); - myUser.registerUser(); - } catch (Exception e) { - logger.warn("Error code: ", e); - } - - if (myUser.getUserId() != null) { - logger.info("User " + myUser.getUserName() + " was created successfully, starting VM creation"); - //create VMs for the user - for (int i = 0; i < s_numVM; i++) { - //Create a new VM, add it to the list of user's VMs - VirtualMachine myVM = new VirtualMachine(myUser.getUserId()); - myVM.deployVM(ZoneId, ServiceOfferingId, TemplateId, myUser.getDeveloperServer(), myUser.getApiKey(), myUser.getSecretKey()); - myUser.getVirtualMachines().add(myVM); - singlePrivateIp = myVM.getPrivateIp(); - - if (singlePrivateIp != null) { - logger.info("VM with private Ip " + singlePrivateIp + " was successfully created"); - } else { - logger.info("Problems with VM creation for a user" + myUser.getUserName()); - break; - } - - //get public IP address for the User - myUser.retrievePublicIp(ZoneId); - singlePublicIp = myUser.getPublicIp().get(myUser.getPublicIp().size() - 1); - if (singlePublicIp != null) { - logger.info("Successfully got public Ip " + singlePublicIp + " for user " + myUser.getUserName()); - } else { - logger.info("Problems with getting public Ip address for user" + myUser.getUserName()); - break; - } - - //create ForwardProxy rules for user's VMs - int responseCode = CreateForwardingRule(myUser, singlePrivateIp, singlePublicIp, "22", "22"); - if (responseCode == 500) - break; - } - - logger.info("Deployment successful..." + s_numVM + " VMs were created. Waiting for 5 min before performance test"); - Thread.sleep(300000L); // Wait - - //Start performance test for the user - logger.info("Starting performance test for Guest network that has " + myUser.getPublicIp().size() + " public IP addresses"); - for (int j = 0; j < myUser.getPublicIp().size(); j++) { - logger.info("Starting test for user which has " + myUser.getVirtualMachines().size() + " vms. Public IP for the user is " + - myUser.getPublicIp().get(j) + " , number of retries is " + Retry + " , private IP address of the machine is" + - myUser.getVirtualMachines().get(j).getPrivateIp()); - GuestNetwork myNetwork = new GuestNetwork(myUser.getPublicIp().get(j), Retry); - myNetwork.setVirtualMachines(myUser.getVirtualMachines()); - new Thread(myNetwork).start(); - } - - } - } catch (Exception e) { - logger.error(e); - } - } - }).start(); - - } - } - - private static int CreateForwardingRule(User myUser, String privateIp, String publicIp, String publicPort, String privatePort) throws IOException { - String encodedPrivateIp = URLEncoder.encode("" + privateIp, "UTF-8"); - String encodedPublicIp = URLEncoder.encode("" + publicIp, "UTF-8"); - String encodedPrivatePort = URLEncoder.encode("" + privatePort, "UTF-8"); - String encodedPublicPort = URLEncoder.encode("" + publicPort, "UTF-8"); - String encodedApiKey = URLEncoder.encode(myUser.getApiKey(), "UTF-8"); - int responseCode = 500; - - String requestToSign = - "apiKey=" + encodedApiKey + "&command=createOrUpdateIpForwardingRule&privateIp=" + encodedPrivateIp + "&privatePort=" + encodedPrivatePort + - "&protocol=tcp&publicIp=" + encodedPublicIp + "&publicPort=" + encodedPublicPort; - - requestToSign = requestToSign.toLowerCase(); - logger.info("Request to sign is " + requestToSign); - - String signature = TestClientWithAPI.signRequest(requestToSign, myUser.getSecretKey()); - String encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - String url = - myUser.getDeveloperServer() + "?command=createOrUpdateIpForwardingRule" + "&publicIp=" + encodedPublicIp + "&publicPort=" + encodedPublicPort + - "&privateIp=" + encodedPrivateIp + "&privatePort=" + encodedPrivatePort + "&protocol=tcp&apiKey=" + encodedApiKey + "&signature=" + encodedSignature; - - logger.info("Trying to create IP forwarding rule: " + url); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("create ip forwarding rule response code: " + responseCode); - if (responseCode == 200) { - logger.info("The rule is created successfully"); - } else if (responseCode == 500) { - InputStream is = method.getResponseBodyAsStream(); - Map errorInfo = TestClientWithAPI.getSingleValueFromXML(is, new String[] {"errorCode", "description"}); - logger.error("create ip forwarding rule (linux) test failed with errorCode: " + errorInfo.get("errorCode") + " and description: " + - errorInfo.get("description")); - } else { - logger.error("internal error processing request: " + method.getStatusText()); - } - return responseCode; - } - -} diff --git a/test/src-not-used/main/java/com/cloud/test/longrun/User.java b/test/src-not-used/main/java/com/cloud/test/longrun/User.java deleted file mode 100644 index 56880cb9df7..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/longrun/User.java +++ /dev/null @@ -1,203 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.longrun; - -import java.io.IOException; -import java.io.InputStream; -import java.net.URLEncoder; -import java.util.ArrayList; -import java.util.Map; - -import org.apache.commons.httpclient.HttpClient; -import org.apache.commons.httpclient.HttpException; -import org.apache.commons.httpclient.HttpMethod; -import org.apache.commons.httpclient.methods.GetMethod; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - -import com.cloud.test.stress.TestClientWithAPI; - -public class User { - protected Logger logger = LogManager.getLogger(getClass()); - - private ArrayList virtualMachines; - private ArrayList publicIp; - private String server; - private String developerServer; - private String userName; - private String userId; - private String apiKey; - private String secretKey; - private String password; - private String encryptedPassword; - - public User(String userName, String password, String server, String developerServer) { - this.server = server; - this.developerServer = developerServer; - this.userName = userName; - this.password = password; - this.virtualMachines = new ArrayList(); - this.publicIp = new ArrayList(); - } - - public ArrayList getVirtualMachines() { - return virtualMachines; - } - - public void setVirtualMachines(ArrayList virtualMachines) { - this.virtualMachines = virtualMachines; - } - - public String getUserId() { - return userId; - } - - public void setUserId(String userId) { - this.userId = userId; - } - - public ArrayList getPublicIp() { - return publicIp; - } - - public void setPublicIp(ArrayList publicIp) { - this.publicIp = publicIp; - } - - public String getServer() { - return server; - } - - public void setServer(String server) { - this.server = server; - } - - public String getUserName() { - return userName; - } - - public void setUserName(String userName) { - this.userName = userName; - } - - public String getApiKey() { - return apiKey; - } - - public void setApiKey(String apiKey) { - this.apiKey = apiKey; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public String getSecretKey() { - return secretKey; - } - - public void setSecretKey(String secretKey) { - this.secretKey = secretKey; - } - - public String getDeveloperServer() { - return developerServer; - } - - public void setDeveloperServer(String developerServer) { - this.developerServer = developerServer; - } - - public void launchUser() throws IOException { - String encodedUsername = URLEncoder.encode(this.getUserName(), "UTF-8"); - this.encryptedPassword = TestClientWithAPI.createMD5Password(this.getPassword()); - String encodedPassword = URLEncoder.encode(this.encryptedPassword, "UTF-8"); - String url = - this.server + "?command=createUser&username=" + encodedUsername + "&password=" + encodedPassword + - "&firstname=Test&lastname=Test&email=alena@vmops.com&domainId=1"; - String userIdStr = null; - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map userIdValues = TestClientWithAPI.getSingleValueFromXML(is, new String[] {"id"}); - userIdStr = userIdValues.get("id"); - if ((userIdStr != null) && (Long.parseLong(userIdStr) != -1)) { - this.setUserId(userIdStr); - } - } - } - - public void retrievePublicIp(long zoneId) throws IOException { - - String encodedApiKey = URLEncoder.encode(this.apiKey, "UTF-8"); - String encodedZoneId = URLEncoder.encode("" + zoneId, "UTF-8"); - String requestToSign = "apiKey=" + encodedApiKey + "&command=associateIpAddress" + "&zoneId=" + encodedZoneId; - requestToSign = requestToSign.toLowerCase(); - String signature = TestClientWithAPI.signRequest(requestToSign, this.secretKey); - String encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - String url = this.developerServer + "?command=associateIpAddress" + "&apiKey=" + encodedApiKey + "&zoneId=" + encodedZoneId + "&signature=" + encodedSignature; - - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map values = TestClientWithAPI.getSingleValueFromXML(is, new String[] {"ipaddress"}); - this.getPublicIp().add(values.get("ipaddress")); - logger.info("Ip address is " + values.get("ipaddress")); - } else if (responseCode == 500) { - InputStream is = method.getResponseBodyAsStream(); - Map errorInfo = TestClientWithAPI.getSingleValueFromXML(is, new String[] {"errorcode", "description"}); - logger.error("associate ip test failed with errorCode: " + errorInfo.get("errorCode") + " and description: " + errorInfo.get("description")); - } else { - logger.error("internal error processing request: " + method.getStatusText()); - } - - } - - public void registerUser() throws HttpException, IOException { - - String encodedUsername = URLEncoder.encode(this.userName, "UTF-8"); - String encodedPassword = URLEncoder.encode(this.password, "UTF-8"); - String url = server + "?command=register&username=" + encodedUsername + "&domainid=1"; - logger.info("registering: " + this.userName + " with url " + url); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map requestKeyValues = TestClientWithAPI.getSingleValueFromXML(is, new String[] {"apikey", "secretkey"}); - this.setApiKey(requestKeyValues.get("apikey")); - this.setSecretKey(requestKeyValues.get("secretkey")); - } else if (responseCode == 500) { - InputStream is = method.getResponseBodyAsStream(); - Map errorInfo = TestClientWithAPI.getSingleValueFromXML(is, new String[] {"errorcode", "description"}); - logger.error("registration failed with errorCode: " + errorInfo.get("errorCode") + " and description: " + errorInfo.get("description")); - } else { - logger.error("internal error processing request: " + method.getStatusText()); - } - } - -} diff --git a/test/src-not-used/main/java/com/cloud/test/longrun/VirtualMachine.java b/test/src-not-used/main/java/com/cloud/test/longrun/VirtualMachine.java deleted file mode 100644 index 61ca082273c..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/longrun/VirtualMachine.java +++ /dev/null @@ -1,96 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.longrun; - -import java.io.IOException; -import java.io.InputStream; -import java.net.URLEncoder; -import java.util.Map; - -import org.apache.commons.httpclient.HttpClient; -import org.apache.commons.httpclient.HttpMethod; -import org.apache.commons.httpclient.methods.GetMethod; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - -import com.cloud.test.stress.TestClientWithAPI; - -public class VirtualMachine { - protected Logger logger = LogManager.getLogger(getClass()); - - private String privateIp; - private String userId; - - public VirtualMachine(String userId) { - this.userId = userId; - } - - public String getPrivateIp() { - return privateIp; - } - - public void setPrivateIp(String privateIp) { - this.privateIp = privateIp; - } - - public String getUserId() { - return userId; - } - - public void setUserId(String userId) { - this.userId = userId; - } - - public void deployVM(long zoneId, long serviceOfferingId, long templateId, String server, String apiKey, String secretKey) throws IOException { - - String encodedZoneId = URLEncoder.encode("" + zoneId, "UTF-8"); - String encodedServiceOfferingId = URLEncoder.encode("" + serviceOfferingId, "UTF-8"); - String encodedTemplateId = URLEncoder.encode("" + templateId, "UTF-8"); - String encodedApiKey = URLEncoder.encode(apiKey, "UTF-8"); - String requestToSign = - "apiKey=" + encodedApiKey + "&command=deployVirtualMachine&serviceOfferingId=" + encodedServiceOfferingId + "&templateId=" + encodedTemplateId + "&zoneId=" + - encodedZoneId; - - requestToSign = requestToSign.toLowerCase(); - String signature = TestClientWithAPI.signRequest(requestToSign, secretKey); - String encodedSignature = URLEncoder.encode(signature, "UTF-8"); - String url = - server + "?command=deployVirtualMachine" + "&zoneId=" + encodedZoneId + "&serviceOfferingId=" + encodedServiceOfferingId + "&templateId=" + - encodedTemplateId + "&apiKey=" + encodedApiKey + "&signature=" + encodedSignature; - - logger.info("Sending this request to deploy a VM: " + url); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - logger.info("deploy linux vm response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map values = TestClientWithAPI.getSingleValueFromXML(is, new String[] {"id", "ipaddress"}); - long linuxVMId = Long.parseLong(values.get("id")); - logger.info("got linux virtual machine id: " + linuxVMId); - this.setPrivateIp(values.get("ipaddress")); - - } else if (responseCode == 500) { - InputStream is = method.getResponseBodyAsStream(); - Map errorInfo = TestClientWithAPI.getSingleValueFromXML(is, new String[] {"errorcode", "description"}); - logger.error("deploy linux vm test failed with errorCode: " + errorInfo.get("errorCode") + " and description: " + errorInfo.get("description")); - } else { - logger.error("internal error processing request: " + method.getStatusText()); - } - } - -} diff --git a/test/src-not-used/main/java/com/cloud/test/regression/ApiCommand.java b/test/src-not-used/main/java/com/cloud/test/regression/ApiCommand.java deleted file mode 100644 index 4b48be6c725..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/regression/ApiCommand.java +++ /dev/null @@ -1,849 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.regression; - -import java.io.File; -import java.io.FileInputStream; -import java.io.InputStream; -import java.net.URLEncoder; -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Enumeration; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.Properties; -import java.util.Random; -import java.util.Set; -import java.util.TreeMap; - -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; - -import org.apache.commons.httpclient.HttpClient; -import org.apache.commons.httpclient.HttpMethod; -import org.apache.commons.httpclient.methods.GetMethod; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -import com.cloud.test.utils.UtilsForTest; - -public class ApiCommand { - protected Logger logger = LogManager.getLogger(getClass()); - - public static enum CommandType { - HTTP, MYSQL, SCRIPT; - } - - public static enum ResponseType { - ERROR, EMPTY; - } - - private Element xmlCommand; - private String commandName; - private String testCaseInfo; - private boolean isUserCommand; - private boolean isAsync = false; - private CommandType commandType; - private ResponseType responseType; - - private TreeMap urlParam; - private HashMap verifyParam = new HashMap();; - private HashMap setParam = new HashMap();; - private int responseCode; - private Element responseBody; - - private String command; - private String host; - private boolean list; - private Element listName; - private Element listId; - private boolean required = false; - private ResultSet result; - - public ApiCommand(Element fstElmnt, HashMap param, HashMap commands) { - this.setXmlCommand(fstElmnt); - this.setCommandName(); - this.setResponseType(); - this.setUserCommand(); - this.setCommandType(); - this.setTestCaseInfo(); - this.setUrlParam(param); - this.setVerifyParam(param); - this.setHost("http://" + param.get("hostip")); - this.setCommand(param); - String async = commands.get(this.getName()); - if (async != null && async.equals("yes")) { - this.isAsync = true; - - } - } - - public Element getXmlCommand() { - return xmlCommand; - } - - public void setXmlCommand(Element xmlCommand) { - this.xmlCommand = xmlCommand; - } - - // ================FOLLOWING METHODS USE INPUT XML FILE=======================// - public void setCommandName() { - NodeList commandName = this.xmlCommand.getElementsByTagName("name"); - Element commandElmnt = (Element)commandName.item(0); - NodeList commandNm = commandElmnt.getChildNodes(); - this.commandName = (commandNm.item(0).getNodeValue()); - } - - public String getName() { - return commandName; - } - - public void setTestCaseInfo() { - this.testCaseInfo = getElementByName("testcase"); - } - - public String getHost() { - return host; - } - - public void setHost(String host) { - this.host = host; - } - - public void setResponseType() { - boolean result = verifyTagValue("error", "true"); - if (result) { - this.responseType = ResponseType.ERROR; - return; - } - result = verifyTagValue("empty", "true"); - if (result) { - this.responseType = ResponseType.EMPTY; - } - } - - public void setResponseType(ResponseType responseType) { - this.responseType = responseType; - } - - public ResponseType getResponseType() { - return responseType; - } - - public void setUserCommand() { - boolean result = verifyTagValue("usercommand", "true"); - this.isUserCommand = result; - } - - public void setCommandType() { - boolean result = verifyTagValue("mysql", "true"); - if (result) { - this.commandType = CommandType.MYSQL; - return; - } - result = verifyTagValue("script", "true"); - if (result) { - this.commandType = CommandType.SCRIPT; - return; - } - this.commandType = CommandType.HTTP; - } - - public CommandType getCommandType() { - return commandType; - } - - public String getTestCaseInfo() { - return testCaseInfo; - } - - public Boolean getRequired() { - return required; - } - - public void setUrlParam(HashMap param) { - this.urlParam = new TreeMap(); - NodeList parameterLst = this.xmlCommand.getElementsByTagName("parameters"); - if (parameterLst != null) { - for (int j = 0; j < parameterLst.getLength(); j++) { - Element parameterElement = (Element)parameterLst.item(j); - NodeList itemLst = parameterElement.getElementsByTagName("item"); - for (int k = 0; k < itemLst.getLength(); k++) { - Node item = itemLst.item(k); - if (item.getNodeType() == Node.ELEMENT_NODE) { - Element itemElement = (Element)item; - NodeList itemName = itemElement.getElementsByTagName("name"); - Element itemNameElement = (Element)itemName.item(0); - - // get value - Element itemValueElement = null; - if ((itemElement.getElementsByTagName("value") != null) && (itemElement.getElementsByTagName("value").getLength() != 0)) { - NodeList itemValue = itemElement.getElementsByTagName("value"); - itemValueElement = (Element)itemValue.item(0); - } - - Element itemParamElement = null; - // getparam - if ((itemElement.getElementsByTagName("param") != null) && (itemElement.getElementsByTagName("param").getLength() != 0)) { - NodeList itemParam = itemElement.getElementsByTagName("param"); - itemParamElement = (Element)itemParam.item(0); - } - - if ((itemElement.getAttribute("getparam").equals("true")) && (itemParamElement != null)) { - this.urlParam.put(itemNameElement.getTextContent(), param.get(itemParamElement.getTextContent())); - } else if (itemValueElement != null) { - this.urlParam.put(itemNameElement.getTextContent(), itemValueElement.getTextContent()); - } else if (itemElement.getAttribute("random").equals("true")) { - Random ran = new Random(); - String randomString = Math.abs(ran.nextInt()) + "-randomName"; - this.urlParam.put(itemNameElement.getTextContent(), randomString); - if ((itemElement.getAttribute("setparam").equals("true")) && (itemParamElement != null)) { - param.put(itemParamElement.getTextContent(), randomString); - } - } else if (itemElement.getAttribute("randomnumber").equals("true")) { - Random ran = new Random(); - Integer randomNumber = Math.abs(ran.nextInt(65535)); - this.urlParam.put(itemNameElement.getTextContent(), randomNumber.toString()); - if ((itemElement.getAttribute("setparam").equals("true")) && (itemParamElement != null)) { - param.put(itemParamElement.getTextContent(), randomNumber.toString()); - } - } - } - } - } - } - } - - // Set command URL - public void setCommand(HashMap param) { - - if (this.getCommandType() == CommandType.SCRIPT) { - String temp = "bash xen/" + this.commandName; - Set c = this.urlParam.entrySet(); - Iterator it = c.iterator(); - while (it.hasNext()) { - Map.Entry me = (Map.Entry)it.next(); - String key = (String)me.getKey(); - String value = (String)me.getValue(); - try { - temp = temp + " -" + key + " " + value; - } catch (Exception ex) { - logger.error("Unable to set parameter " + key + " for the command " + this.getName()); - } - } - this.command = temp; - } else if (this.getCommandType() == CommandType.MYSQL) { - String temp = this.commandName + " where "; - Set c = this.urlParam.entrySet(); - Iterator it = c.iterator(); - while (it.hasNext()) { - Map.Entry me = (Map.Entry)it.next(); - String key = (String)me.getKey(); - String value = (String)me.getValue(); - try { - temp = temp + key + "=" + value; - } catch (Exception ex) { - logger.error("Unable to set parameter " + key + " for the command " + this.getName()); - } - } - this.command = temp; - logger.info("The command is " + this.command); - - } else { - if ((param.get("apikey") == null) || (param.get("secretkey") == null) || (this.isUserCommand == false)) { - String temp = this.host + ":8096/?command=" + this.commandName; - Set c = this.urlParam.entrySet(); - Iterator it = c.iterator(); - while (it.hasNext()) { - Map.Entry me = (Map.Entry)it.next(); - String key = (String)me.getKey(); - String value = (String)me.getValue(); - try { - temp = temp + "&" + key + "=" + URLEncoder.encode(value, "UTF-8"); - } catch (Exception ex) { - logger.error("Unable to set parameter " + key + " for the command " + this.getName()); - } - } - this.command = temp; - } else if (isUserCommand == true) { - String apiKey = param.get("apikey"); - String secretKey = param.get("secretkey"); - - String temp = ""; - this.urlParam.put("apikey", apiKey); - this.urlParam.put("command", this.commandName); - - // sort url hash map by key - Set c = this.urlParam.entrySet(); - Iterator it = c.iterator(); - while (it.hasNext()) { - Map.Entry me = (Map.Entry)it.next(); - String key = (String)me.getKey(); - String value = (String)me.getValue(); - try { - temp = temp + key + "=" + URLEncoder.encode(value, "UTF-8") + "&"; - } catch (Exception ex) { - logger.error("Unable to set parameter " + value + " for the command " + this.getName()); - } - - } - temp = temp.substring(0, temp.length() - 1); - String requestToSign = temp.toLowerCase(); - String signature = UtilsForTest.signRequest(requestToSign, secretKey); - String encodedSignature = ""; - try { - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - } catch (Exception ex) { - logger.error(ex); - } - this.command = this.host + ":8080/client/api/?" + temp + "&signature=" + encodedSignature; - } - } - } - - public void setVerifyParam(HashMap param) { - NodeList returnLst = this.xmlCommand.getElementsByTagName("returnvalue"); - if (returnLst != null) { - for (int m = 0; m < returnLst.getLength(); m++) { - Element returnElement = (Element)returnLst.item(m); - if (returnElement.getAttribute("list").equals("true")) { - this.list = true; - NodeList elementLst = returnElement.getElementsByTagName("element"); - this.listId = (Element)elementLst.item(0); - NodeList elementName = returnElement.getElementsByTagName("name"); - this.listName = (Element)elementName.item(0); - } else { - this.list = false; - } - - NodeList itemLst1 = returnElement.getElementsByTagName("item"); - if (itemLst1 != null) { - for (int n = 0; n < itemLst1.getLength(); n++) { - Node item = itemLst1.item(n); - if (item.getNodeType() == Node.ELEMENT_NODE) { - Element itemElement = (Element)item; - // get parameter name - NodeList itemName = itemElement.getElementsByTagName("name"); - Element itemNameElement = (Element)itemName.item(0); - - // Get parameters for future use - if (itemElement.getAttribute("setparam").equals("true")) { - NodeList itemVariable = itemElement.getElementsByTagName("param"); - Element itemVariableElement = (Element)itemVariable.item(0); - setParam.put(itemVariableElement.getTextContent(), itemNameElement.getTextContent()); - this.required = true; - } else if (itemElement.getAttribute("getparam").equals("true")) { - NodeList itemVariable = itemElement.getElementsByTagName("param"); - Element itemVariableElement = (Element)itemVariable.item(0); - this.verifyParam.put(itemNameElement.getTextContent(), param.get(itemVariableElement.getTextContent())); - } else if ((itemElement.getElementsByTagName("value") != null) && (itemElement.getElementsByTagName("value").getLength() != 0)) { - NodeList itemVariable = itemElement.getElementsByTagName("value"); - Element itemVariableElement = (Element)itemVariable.item(0); - this.verifyParam.put(itemNameElement.getTextContent(), itemVariableElement.getTextContent()); - } else { - this.verifyParam.put(itemNameElement.getTextContent(), "no value"); - } - } - } - } - } - } - } - - public int getResponseCode() { - return responseCode; - } - - // Send api command to the server - public void sendCommand(HttpClient client, Connection conn) { - if (TestCaseEngine.s_printUrl == true) { - logger.info("url is " + this.command); - } - - if (this.getCommandType() == CommandType.SCRIPT) { - try { - logger.info("Executing command " + this.command); - Runtime rtime = Runtime.getRuntime(); - Process child = rtime.exec(this.command); - Thread.sleep(10000); - int retCode = child.waitFor(); - if (retCode != 0) { - this.responseCode = retCode; - } else { - this.responseCode = 200; - } - - } catch (Exception ex) { - logger.error("Unable to execute a command " + this.command, ex); - } - } else if (this.getCommandType() == CommandType.MYSQL) { - try { - Statement stmt = conn.createStatement(); - this.result = stmt.executeQuery(this.command); - this.responseCode = 200; - } catch (Exception ex) { - this.responseCode = 400; - logger.error("Unable to execute mysql query " + this.command, ex); - } - } else { - HttpMethod method = new GetMethod(this.command); - try { - this.responseCode = client.executeMethod(method); - - if (this.responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); - DocumentBuilder builder = factory.newDocumentBuilder(); - Document doc = builder.parse(is); - doc.getDocumentElement().normalize(); - - if (!(this.isAsync)) { - this.responseBody = doc.getDocumentElement(); - } else { - // get async job result - Element jobTag = (Element)doc.getDocumentElement().getElementsByTagName("jobid").item(0); - String jobId = jobTag.getTextContent(); - Element responseBodyAsyncEl = queryAsyncJobResult(jobId); - if (responseBodyAsyncEl == null) { - logger.error("Can't get a async result"); - } else { - this.responseBody = responseBodyAsyncEl; - // get status of the job - Element jobStatusTag = (Element)responseBodyAsyncEl.getElementsByTagName("jobstatus").item(0); - String jobStatus = jobStatusTag.getTextContent(); - if (!jobStatus.equals("1")) { // Need to modify with different error codes for jobAsync -// results - // set fake response code by now - this.responseCode = 400; - } - } - } - } - - if (TestCaseEngine.s_printUrl == true) { - logger.info("Response code is " + this.responseCode); - } - } catch (Exception ex) { - logger.error("Command " + command + " failed with exception " + ex.getMessage()); - } finally { - method.releaseConnection(); - } - } - } - - // verify if response is empty (contains only root element) - public boolean isEmpty() { - boolean result = false; - if (!this.responseBody.hasChildNodes()) - result = true; - return result; - } - - // ================FOLLOWING METHODS USE RETURN XML FILE=======================// - - public boolean setParam(HashMap param) { - if ((this.responseBody == null) && (this.commandType == CommandType.HTTP)) { - logger.error("Response body is empty"); - return false; - } - Boolean result = true; - - if (this.getCommandType() == CommandType.MYSQL) { - Set set = this.setParam.entrySet(); - Iterator it = set.iterator(); - while (it.hasNext()) { - Map.Entry me = (Map.Entry)it.next(); - String key = (String)me.getKey(); - String value = (String)me.getValue(); - try { - String itemName = null; - while (this.result.next()) { - itemName = this.result.getString(value); - } - if (itemName != null) { - param.put(key, itemName); - } else { - logger.error("Following return parameter is missing: " + value); - result = false; - } - } catch (Exception ex) { - logger.error("Unable to set parameter " + value, ex); - } - } - } else if (this.getCommandType() == CommandType.HTTP) { - if (this.list == false) { - Set set = this.setParam.entrySet(); - Iterator it = set.iterator(); - - while (it.hasNext()) { - Map.Entry me = (Map.Entry)it.next(); - String key = (String)me.getKey(); - String value = (String)me.getValue(); - // set parameters needed for the future use - NodeList itemName = this.responseBody.getElementsByTagName(value); - if ((itemName != null) && (itemName.getLength() != 0)) { - for (int i = 0; i < itemName.getLength(); i++) { - Element itemNameElement = (Element)itemName.item(i); - if (itemNameElement.getChildNodes().getLength() <= 1) { - param.put(key, itemNameElement.getTextContent()); - break; - } - } - } else { - logger.error("Following return parameter is missing: " + value); - result = false; - } - } - } else { - Set set = this.setParam.entrySet(); - Iterator it = set.iterator(); - NodeList returnLst = this.responseBody.getElementsByTagName(this.listName.getTextContent()); - Node requiredNode = returnLst.item(Integer.parseInt(this.listId.getTextContent())); - - if (requiredNode.getNodeType() == Node.ELEMENT_NODE) { - Element fstElmnt = (Element)requiredNode; - - while (it.hasNext()) { - Map.Entry me = (Map.Entry)it.next(); - String key = (String)me.getKey(); - String value = (String)me.getValue(); - NodeList itemName = fstElmnt.getElementsByTagName(value); - if ((itemName != null) && (itemName.getLength() != 0)) { - Element itemNameElement = (Element)itemName.item(0); - if (itemNameElement.getChildNodes().getLength() <= 1) { - param.put(key, itemNameElement.getTextContent()); - } - } else { - logger.error("Following return parameter is missing: " + value); - result = false; - } - } - } - } - } - return result; - } - - public String getUrl() { - return command; - } - - public boolean verifyParam() { - boolean result = true; - if (this.getCommandType() == CommandType.HTTP) { - if (this.list == false) { - Set set = verifyParam.entrySet(); - Iterator it = set.iterator(); - - while (it.hasNext()) { - Map.Entry me = (Map.Entry)it.next(); - String key = (String)me.getKey(); - String value = (String)me.getValue(); - if (value == null) { - logger.error("Parameter " + key + " is missing in the list of global parameters"); - return false; - } - - NodeList itemName = this.responseBody.getElementsByTagName(key); - if ((itemName.getLength() != 0) && (itemName != null)) { - Element itemNameElement = (Element)itemName.item(0); - if (itemNameElement.hasChildNodes()) { - continue; - } - if (!(verifyParam.get(key).equals("no value")) && !(itemNameElement.getTextContent().equals(verifyParam.get(key)))) { - logger.error("Incorrect value for the following tag: " + key + ". Expected value is " + verifyParam.get(key) + " while actual value is " + - itemNameElement.getTextContent()); - result = false; - } - } else { - logger.error("Following xml element is missing in the response: " + key); - result = false; - } - } - } - // for multiple elements - else { - Set set = verifyParam.entrySet(); - Iterator it = set.iterator(); - // get list element specified by id - NodeList returnLst = this.responseBody.getElementsByTagName(this.listName.getTextContent()); - Node requiredNode = returnLst.item(Integer.parseInt(this.listId.getTextContent())); - - if (requiredNode.getNodeType() == Node.ELEMENT_NODE) { - Element fstElmnt = (Element)requiredNode; - - while (it.hasNext()) { - Map.Entry me = (Map.Entry)it.next(); - String key = (String)me.getKey(); - String value = (String)me.getValue(); - if (value == null) { - logger.error("Parameter " + key + " is missing in the list of global parameters"); - return false; - } - NodeList itemName = fstElmnt.getElementsByTagName(key); - if ((itemName.getLength() != 0) && (itemName != null)) { - Element itemNameElement = (Element)itemName.item(0); - if (!(verifyParam.get(key).equals("no value")) && !(itemNameElement.getTextContent().equals(verifyParam.get(key)))) { - logger.error("Incorrect value for the following tag: " + key + ". Expected value is " + verifyParam.get(key) + - " while actual value is " + itemNameElement.getTextContent()); - result = false; - } - } else { - logger.error("Following xml element is missing in the response: " + key); - result = false; - } - } - } - } - } else if (this.getCommandType() == CommandType.MYSQL) { - Set set = verifyParam.entrySet(); - Iterator it = set.iterator(); - - while (it.hasNext()) { - Map.Entry me = (Map.Entry)it.next(); - String key = (String)me.getKey(); - String value = (String)me.getValue(); - if (value == null) { - logger.error("Parameter " + key + " is missing in the list of global parameters"); - return false; - } - - String itemName = null; - try { - while (this.result.next()) { - itemName = this.result.getString(key); - } - } catch (Exception ex) { - logger.error("Unable to get element from result set " + key); - } - - if (!(value.equals("no value")) && !(itemName.equals(verifyParam.get(key)))) { - logger.error("Incorrect value for the following tag: " + key + ". Expected value is " + verifyParam.get(key) + " while actual value is " + itemName); - result = false; - } - } - } - return result; - } - - public static boolean verifyEvents(String fileName, String level, String host, String account) { - boolean result = false; - HashMap expectedEvents = new HashMap(); - HashMap actualEvents = new HashMap(); - String key = ""; - - File file = new File(fileName); - if (file.exists()) { - Properties pro = new Properties(); - try { - // get expected events - FileInputStream in = new FileInputStream(file); - pro.load(in); - Enumeration en = pro.propertyNames(); - while (en.hasMoreElements()) { - key = (String)en.nextElement(); - expectedEvents.put(key, Integer.parseInt(pro.getProperty(key))); - } - - // get actual events - String url = host + "/?command=listEvents&account=" + account + "&level=" + level + "&domainid=1&pagesize=100"; - logger.info("Getting events with the following url " + url); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - ArrayList> eventValues = UtilsForTest.parseMulXML(is, new String[] {"event"}); - - for (int i = 0; i < eventValues.size(); i++) { - HashMap element = eventValues.get(i); - if (element.get("level").equals(level)) { - if (actualEvents.containsKey(element.get("type")) == true) { - actualEvents.put(element.get("type"), actualEvents.get(element.get("type")) + 1); - } else { - actualEvents.put(element.get("type"), 1); - } - } - } - } - method.releaseConnection(); - - // compare actual events with expected events - - // compare expected result and actual result - Iterator iterator = expectedEvents.keySet().iterator(); - Integer expected; - Integer actual; - int fail = 0; - while (iterator.hasNext()) { - expected = null; - actual = null; - String type = iterator.next().toString(); - expected = expectedEvents.get(type); - actual = actualEvents.get(type); - if (actual == null) { - logger.error("Event of type " + type + " and level " + level + " is missing in the listEvents response. Expected number of these events is " + - expected); - fail++; - } else if (expected.compareTo(actual) != 0) { - fail++; - logger.info("Amount of events of " + type + " type and level " + level + " is incorrect. Expected number of these events is " + expected + - ", actual number is " + actual); - } - } - if (fail == 0) { - result = true; - } - } catch (Exception ex) { - logger.error(ex); - } - } else { - logger.info("File " + fileName + " not found"); - } - return result; - } - - public static boolean verifyEvents(HashMap expectedEvents, String level, String host, String parameters) { - boolean result = false; - HashMap actualEvents = new HashMap(); - try { - // get actual events - String url = host + "/?command=listEvents&" + parameters; - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - ArrayList> eventValues = UtilsForTest.parseMulXML(is, new String[] {"event"}); - - for (int i = 0; i < eventValues.size(); i++) { - HashMap element = eventValues.get(i); - if (element.get("level").equals(level)) { - if (actualEvents.containsKey(element.get("type")) == true) { - actualEvents.put(element.get("type"), actualEvents.get(element.get("type")) + 1); - } else { - actualEvents.put(element.get("type"), 1); - } - } - } - } - method.releaseConnection(); - } catch (Exception ex) { - logger.error(ex); - } - - // compare actual events with expected events - Iterator iterator = expectedEvents.keySet().iterator(); - Integer expected; - Integer actual; - int fail = 0; - while (iterator.hasNext()) { - expected = null; - actual = null; - String type = iterator.next().toString(); - expected = expectedEvents.get(type); - actual = actualEvents.get(type); - if (actual == null) { - logger.error("Event of type " + type + " and level " + level + " is missing in the listEvents response. Expected number of these events is " + expected); - fail++; - } else if (expected.compareTo(actual) != 0) { - fail++; - logger.info("Amount of events of " + type + " type and level " + level + " is incorrect. Expected number of these events is " + expected + - ", actual number is " + actual); - } - } - - if (fail == 0) { - result = true; - } - - return result; - } - - public Element queryAsyncJobResult(String jobId) { - Element returnBody = null; - int code = 400; - String resultUrl = this.host + ":8096/?command=queryAsyncJobResult&jobid=" + jobId; - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(resultUrl); - while (true) { - try { - code = client.executeMethod(method); - if (code == 200) { - InputStream is = method.getResponseBodyAsStream(); - DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); - DocumentBuilder builder = factory.newDocumentBuilder(); - Document doc = builder.parse(is); - doc.getDocumentElement().normalize(); - returnBody = doc.getDocumentElement(); - Element jobStatusTag = (Element)returnBody.getElementsByTagName("jobstatus").item(0); - String jobStatus = jobStatusTag.getTextContent(); - if (jobStatus.equals("0")) { - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - logger.debug("[ignored] interrupted while during async job result query."); - } - } else { - break; - } - method.releaseConnection(); - } else { - logger.error("Error during queryJobAsync. Error code is " + code); - this.responseCode = code; - return null; - } - } catch (Exception ex) { - logger.error(ex); - } - } - return returnBody; - } - - private String getElementByName(String elementName) { - NodeList commandName = this.xmlCommand.getElementsByTagName(elementName); - if (commandName.getLength() != 0) { - Element commandElmnt = (Element)commandName.item(0); - NodeList commandNm = commandElmnt.getChildNodes(); - return commandNm.item(0).getNodeValue(); - } else { - return null; - } - } - - private boolean verifyTagValue(String elementName, String expectedValue) { - NodeList tag = this.xmlCommand.getElementsByTagName(elementName); - if (tag.getLength() != 0) { - Element commandElmnt = (Element)tag.item(0); - NodeList commandNm = commandElmnt.getChildNodes(); - if (commandNm.item(0).getNodeValue().equals(expectedValue)) { - return true; - } else { - return false; - } - } else { - return false; - } - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/regression/ConfigTest.java b/test/src-not-used/main/java/com/cloud/test/regression/ConfigTest.java deleted file mode 100644 index 661a4289932..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/regression/ConfigTest.java +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.regression; - -import java.util.HashMap; - -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -import com.trilead.ssh2.Connection; -import com.trilead.ssh2.Session; - -import com.cloud.test.regression.ApiCommand.ResponseType; - -public class ConfigTest extends TestCase { - - public ConfigTest() { - this.setClient(); - this.setParam(new HashMap()); - } - - @Override - public boolean executeTest() { - - int error = 0; - Element rootElement = this.getInputFile().get(0).getDocumentElement(); - NodeList commandLst = rootElement.getElementsByTagName("command"); - - //Analyze each command, send request and build the array list of api commands - for (int i = 0; i < commandLst.getLength(); i++) { - Node fstNode = commandLst.item(i); - Element fstElmnt = (Element)fstNode; - - //new command - ApiCommand api = new ApiCommand(fstElmnt, this.getParam(), this.getCommands()); - - if (api.getName().equals("rebootManagementServer")) { - - logger.info("Attempting to SSH into management server " + this.getParam().get("hostip")); - try { - Connection conn = new Connection(this.getParam().get("hostip")); - conn.connect(null, 60000, 60000); - - logger.info("SSHed successfully into management server " + this.getParam().get("hostip")); - - boolean isAuthenticated = conn.authenticateWithPassword("root", "password"); - - if (isAuthenticated == false) { - logger.info("Authentication failed for root with password"); - return false; - } - - String restartCommand = "service cloud-management restart; service cloud-usage restart"; - Session sess = conn.openSession(); - logger.info("Executing : " + restartCommand); - sess.execCommand(restartCommand); - Thread.sleep(120000); - sess.close(); - conn.close(); - - } catch (Exception ex) { - logger.error(ex); - return false; - } - } else { - //send a command - api.sendCommand(this.getClient(), null); - - //verify the response of the command - if ((api.getResponseType() == ResponseType.ERROR) && (api.getResponseCode() == 200) && (api.getTestCaseInfo() != null)) { - logger.error("Test case " + api.getTestCaseInfo() + - "failed. Command that was supposed to fail, passed. The command was sent with the following url " + api.getUrl()); - error++; - } else if ((api.getResponseType() != ResponseType.ERROR) && (api.getResponseCode() == 200)) { - //set parameters for the future use - if (api.setParam(this.getParam()) == false) { - logger.error("Exiting the test...Command " + api.getName() + - " didn't return parameters needed for the future use. The command was sent with url " + api.getUrl()); - return false; - } else { - //verify parameters - if (api.verifyParam() == false) { - logger.error("Command " + api.getName() + " failed. Verification for returned parameters failed. Command was sent with url " + api.getUrl()); - error++; - } else if (api.getTestCaseInfo() != null) { - logger.info("Test case " + api.getTestCaseInfo() + " passed. Command was sent with the url " + api.getUrl()); - } - } - } else if ((api.getResponseType() != ResponseType.ERROR) && (api.getResponseCode() != 200)) { - logger.error("Command " + api.getName() + " failed with an error code " + api.getResponseCode() + " . Command was sent with url " + api.getUrl() + - " Required: " + api.getRequired()); - if (api.getRequired() == true) { - logger.info("The command is required for the future use, so exiging"); - return false; - } - error++; - } else if (api.getTestCaseInfo() != null) { - logger.info("Test case " + api.getTestCaseInfo() + " passed. Command that was supposed to fail, failed - test passed. Command was sent with url " + - api.getUrl()); - } - } - } - if (error != 0) - return false; - else - return true; - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/regression/DelegatedAdminTest.java b/test/src-not-used/main/java/com/cloud/test/regression/DelegatedAdminTest.java deleted file mode 100644 index 65c3c1e5378..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/regression/DelegatedAdminTest.java +++ /dev/null @@ -1,127 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.regression; - -import java.util.HashMap; - -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -import com.cloud.test.regression.ApiCommand.ResponseType; - -public class DelegatedAdminTest extends TestCase { - - - public DelegatedAdminTest() { - this.setClient(); - this.setParam(new HashMap()); - } - - @Override - public boolean executeTest() { - int error = 0; - - for (Document eachElement : this.getInputFile()) { - - Element rootElement = eachElement.getDocumentElement(); - NodeList commandLst = rootElement.getElementsByTagName("command"); - - //Analyze each command, send request and build the array list of api commands - for (int i = 0; i < commandLst.getLength(); i++) { - boolean verify = false; - Node fstNode = commandLst.item(i); - Element fstElmnt = (Element)fstNode; - - //new command - ApiCommand api = new ApiCommand(fstElmnt, this.getParam(), this.getCommands()); - - if ((eachElement.getElementsByTagName("delegated_admin_verify_part2").getLength() != 0) && !(api.getName().equals("registerUserKeys"))) { - if (api.getName().startsWith("list")) { - - if (this.denyToExecute()) { - api.setResponseType(ResponseType.EMPTY); - } - verify = true; - } - - if (this.denyToExecute()) { - api.setResponseType(ResponseType.ERROR); - } - } - - //send a command - api.sendCommand(this.getClient(), null); - - //verify the response of the command - if ((verify == true) && !(api.getResponseType() == ResponseType.ERROR || api.getResponseType() == ResponseType.EMPTY)) { - logger.error("Test case " + api.getTestCaseInfo() + - " failed. Command that was supposed to fail, passed. The command was sent with the following url " + api.getUrl()); - error++; - } else if ((verify == true) && (api.getResponseType() == ResponseType.ERROR || api.getResponseType() == ResponseType.EMPTY)) { - logger.info("Test case " + api.getTestCaseInfo() + " passed"); - } else if ((api.getResponseType() == ResponseType.ERROR) && (api.getResponseCode() == 200)) { - logger.error("Test case " + api.getTestCaseInfo() + - " failed. Command that was supposed to fail, passed. The command was sent with the following url " + api.getUrl()); - error++; - } else if ((api.getResponseType() != ResponseType.ERROR) && (api.getResponseCode() == 200)) { - //set parameters for the future use - if (api.setParam(this.getParam()) == false) { - logger.error("Exiting the test...Command " + api.getName() + - " didn't return parameters needed for the future use. The command was sent with url " + api.getUrl()); - return false; - } else if (api.getTestCaseInfo() != null) { - logger.info("Test case " + api.getTestCaseInfo() + " passed"); - } - } else if ((api.getResponseType() != ResponseType.ERROR) && (api.getResponseCode() != 200)) { - logger.error("Test case " + api.getTestCaseInfo() + " failed with an error code " + api.getResponseCode() + " . Command was sent with url " + - api.getUrl()); - if (api.getRequired() == true) { - logger.info("The command is required for the future use, so exiging"); - return false; - } - error++; - } else if (api.getTestCaseInfo() != null) { - logger.info("Test case " + api.getTestCaseInfo() + " passed"); - - } - } - } - - if (error != 0) - return false; - else - return true; - } - - public boolean denyToExecute() { - boolean result = true; - Integer level1 = Integer.valueOf(this.getParam().get("domainlevel1")); - Integer level2 = Integer.valueOf(this.getParam().get("domainlevel2")); - String domain1 = this.getParam().get("domainname1"); - String domain2 = this.getParam().get("domainname2"); - - if (this.getParam().get("accounttype2").equals("1")) { - result = false; - } else if ((level2.compareTo(level1) < 0) && (domain1.startsWith(domain2)) && (this.getParam().get("accounttype2").equals("2"))) { - result = false; - } - - return result; - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/regression/Deploy.java b/test/src-not-used/main/java/com/cloud/test/regression/Deploy.java deleted file mode 100644 index ab62841270b..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/regression/Deploy.java +++ /dev/null @@ -1,107 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.regression; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; - -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -public class Deploy extends TestCase { - - public Deploy() { - this.setClient(); - this.setParam(new HashMap()); - } - - @Override - public boolean executeTest() { - int error = 0; - Element rootElement = this.getInputFile().get(0).getDocumentElement(); - NodeList commandLst = rootElement.getElementsByTagName("command"); - - //Analyze each command, send request and build the array list of api commands - for (int i = 0; i < commandLst.getLength(); i++) { - Node fstNode = commandLst.item(i); - Element fstElmnt = (Element)fstNode; - - //new command - ApiCommand api = new ApiCommand(fstElmnt, this.getParam(), this.getCommands()); - - //send a command - api.sendCommand(this.getClient(), null); - - //verify the response of the command - if (api.getResponseCode() != 200) { - error++; - logger.error("The command " + api.getUrl() + " failed"); - } else { - logger.info("The command " + api.getUrl() + " passsed"); - } - } - if (error != 0) - return false; - else - return true; - } - - public static void main(String[] args) { - - List argsList = Arrays.asList(args); - Iterator iter = argsList.iterator(); - String host = null; - String file = null; - - while (iter.hasNext()) { - String arg = iter.next(); - // management server host - if (arg.equals("-h")) { - host = iter.next(); - } - if (arg.equals("-f")) { - file = iter.next(); - } - } - - Deploy deploy = new Deploy(); - - ArrayList inputFile = new ArrayList(); - inputFile.add(file); - deploy.setInputFile(inputFile); - deploy.setTestCaseName("Management server deployment"); - deploy.getParam().put("hostip", host); - deploy.getParam().put("apicommands", "../metadata/func/commands"); - deploy.setCommands(); - - logger.info("Starting deployment against host " + host); - - boolean result = deploy.executeTest(); - if (result == false) { - logger.error("DEPLOYMENT FAILED"); - System.exit(1); - } else { - logger.info("DEPLOYMENT IS SUCCESSFUL"); - } - - } - -} diff --git a/test/src-not-used/main/java/com/cloud/test/regression/EventsApiTest.java b/test/src-not-used/main/java/com/cloud/test/regression/EventsApiTest.java deleted file mode 100644 index e05d6a95f1b..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/regression/EventsApiTest.java +++ /dev/null @@ -1,174 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.regression; - -import java.sql.Statement; -import java.util.HashMap; - -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -import com.trilead.ssh2.Connection; -import com.trilead.ssh2.Session; - -import com.cloud.test.regression.ApiCommand.ResponseType; - -public class EventsApiTest extends TestCase { - - public EventsApiTest() { - this.setClient(); - this.setParam(new HashMap()); - } - - @Override - public boolean executeTest() { - int error = 0; - Element rootElement = this.getInputFile().get(0).getDocumentElement(); - NodeList commandLst = rootElement.getElementsByTagName("command"); - - //Analyze each command, send request and build the array list of api commands - for (int i = 0; i < commandLst.getLength(); i++) { - Node fstNode = commandLst.item(i); - Element fstElmnt = (Element)fstNode; - - //!!!check if we need to execute mySql command - NodeList commandName = fstElmnt.getElementsByTagName("name"); - Element commandElmnt = (Element)commandName.item(0); - NodeList commandNm = commandElmnt.getChildNodes(); - if (commandNm.item(0).getNodeValue().equals("mysqlupdate")) { - //establish connection to mysql server and execute an update command - NodeList mysqlList = fstElmnt.getElementsByTagName("mysqlcommand"); - for (int j = 0; j < mysqlList.getLength(); j++) { - Element itemVariableElement = (Element)mysqlList.item(j); - - logger.info("Executing mysql command " + itemVariableElement.getTextContent()); - try { - Statement st = this.getConn().createStatement(); - st.executeUpdate(itemVariableElement.getTextContent()); - } catch (Exception ex) { - logger.error(ex); - return false; - } - } - } - - else if (commandNm.item(0).getNodeValue().equals("agentcommand")) { - //connect to all the agents and execute agent command - NodeList commandList = fstElmnt.getElementsByTagName("commandname"); - Element commandElement = (Element)commandList.item(0); - NodeList ipList = fstElmnt.getElementsByTagName("ip"); - for (int j = 0; j < ipList.getLength(); j++) { - Element itemVariableElement = (Element)ipList.item(j); - - logger.info("Attempting to SSH into agent " + itemVariableElement.getTextContent()); - try { - Connection conn = new Connection(itemVariableElement.getTextContent()); - conn.connect(null, 60000, 60000); - - logger.info("SSHed successfully into agent " + itemVariableElement.getTextContent()); - - boolean isAuthenticated = conn.authenticateWithPassword("root", "password"); - - if (isAuthenticated == false) { - logger.info("Authentication failed for root with password"); - return false; - } - - Session sess = conn.openSession(); - logger.info("Executing : " + commandElement.getTextContent()); - sess.execCommand(commandElement.getTextContent()); - Thread.sleep(60000); - sess.close(); - conn.close(); - - } catch (Exception ex) { - logger.error(ex); - return false; - } - } - } - - else { - //new command - ApiCommand api = new ApiCommand(fstElmnt, this.getParam(), this.getCommands()); - - //send a command - api.sendCommand(this.getClient(), null); - - //verify the response of the command - if ((api.getResponseType() == ResponseType.ERROR) && (api.getResponseCode() == 200)) { - logger.error("Test case " + api.getTestCaseInfo() + - " failed. Command that was supposed to fail, passed. The command was sent with the following url " + api.getUrl()); - error++; - } else if ((api.getResponseType() != ResponseType.ERROR) && (api.getResponseCode() == 200)) { - //verify if response is suppposed to be empty - if (api.getResponseType() == ResponseType.EMPTY) { - if (api.isEmpty() == true) { - logger.info("Test case " + api.getTestCaseInfo() + " passed. Empty response was returned as expected. Command was sent with url " + - api.getUrl()); - } else { - logger.error("Test case " + api.getTestCaseInfo() + " failed. Empty response was expected. Command was sent with url " + api.getUrl()); - } - } else { - if (api.isEmpty() != false) - logger.error("Test case " + api.getTestCaseInfo() + " failed. Non-empty response was expected. Command was sent with url " + api.getUrl()); - else { - //set parameters for the future use - if (api.setParam(this.getParam()) == false) { - logger.error("Exiting the test...Command " + api.getName() + - " didn't return parameters needed for the future use. The command was sent with url " + api.getUrl()); - return false; - } else if (api.getTestCaseInfo() != null) { - logger.info("Test case " + api.getTestCaseInfo() + " passed. Command was sent with the url " + api.getUrl()); - } - } - } - } else if ((api.getResponseType() != ResponseType.ERROR) && (api.getResponseCode() != 200)) { - logger.error("Command " + api.getName() + " failed with an error code " + api.getResponseCode() + " . Command was sent with url " + api.getUrl()); - if (api.getRequired() == true) { - logger.info("The command is required for the future use, so exiging"); - return false; - } - error++; - } else if (api.getTestCaseInfo() != null) { - logger.info("Test case " + api.getTestCaseInfo() + " passed. Command that was supposed to fail, failed. Command was sent with url " + api.getUrl()); - - } - } - } - - //verify events with userid parameter - test case 97 - HashMap expectedEvents = new HashMap(); - expectedEvents.put("VM.START", 1); - boolean eventResult = - ApiCommand.verifyEvents(expectedEvents, "INFO", "http://" + this.getParam().get("hostip") + ":8096", "userid=" + this.getParam().get("userid1") + - "&type=VM.START"); - logger.info("Test case 97 - listEvent command verification result is " + eventResult); - - //verify error events - eventResult = - ApiCommand.verifyEvents("../metadata/error_events.properties", "ERROR", "http://" + this.getParam().get("hostip") + ":8096", - this.getParam().get("erroruseraccount")); - logger.info("listEvent command verification result is " + eventResult); - - if (error != 0) - return false; - else - return true; - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/regression/HA.java b/test/src-not-used/main/java/com/cloud/test/regression/HA.java deleted file mode 100644 index b70d050011e..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/regression/HA.java +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.regression; - -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -import com.cloud.test.regression.ApiCommand.ResponseType; - -public class HA extends TestCase { - - - public HA() { - this.setClient(); - } - - @Override - public boolean executeTest() { - int error = 0; - Element rootElement = this.getInputFile().get(0).getDocumentElement(); - NodeList commandLst = rootElement.getElementsByTagName("command"); - - //Analyze each command, send request and build the array list of api commands - for (int i = 0; i < commandLst.getLength(); i++) { - - Node fstNode = commandLst.item(i); - Element fstElmnt = (Element)fstNode; - - //new command - ApiCommand api = new ApiCommand(fstElmnt, this.getParam(), this.getCommands()); - - //send a command - api.sendCommand(this.getClient(), this.getConn()); - - //verify the response parameters - if ((api.getResponseCode() != 200) && (api.getRequired() == true)) { - logger.error("Exiting the test....Command " + api.getName() + " required for the future run, failed with an error code " + api.getResponseCode() + - ". Command was sent with the url " + api.getUrl()); - return false; - } else if ((api.getResponseCode() != 200) && (api.getResponseType() != ResponseType.ERROR)) { - error++; - logger.error("Command " + api.getTestCaseInfo() + " failed with an error code " + api.getResponseCode() + " . Command was sent with url " + - api.getUrl()); - } else if ((api.getResponseCode() == 200) && (api.getResponseType() == ResponseType.ERROR)) { - error++; - logger.error("Command " + api.getTestCaseInfo() + " which was supposed to failed, passed. The command was sent with url " + api.getUrl()); - } else { - //set parameters for the future use - if (api.setParam(this.getParam()) == false) { - logger.error("Exiting the test...Command " + api.getName() + " didn't return parameters needed for the future use. Command was sent with url " + - api.getUrl()); - return false; - } - logger.info("Command " + api.getTestCaseInfo() + " passed"); - } - } - - if (error != 0) - return false; - else - return true; - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/regression/LoadBalancingTest.java b/test/src-not-used/main/java/com/cloud/test/regression/LoadBalancingTest.java deleted file mode 100644 index 52c459622c4..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/regression/LoadBalancingTest.java +++ /dev/null @@ -1,140 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.regression; - -import java.util.HashMap; - -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -import com.cloud.test.regression.ApiCommand.ResponseType; - -public class LoadBalancingTest extends TestCase { - - - public LoadBalancingTest() { - this.setClient(); - this.setParam(new HashMap()); - } - - @Override - public boolean executeTest() { - - int error = 0; - Element rootElement = this.getInputFile().get(0).getDocumentElement(); - NodeList commandLst = rootElement.getElementsByTagName("command"); - - //Analyze each command, send request and build the array list of api commands - for (int i = 0; i < commandLst.getLength(); i++) { - - Node fstNode = commandLst.item(i); - Element fstElmnt = (Element)fstNode; - - //new command - ApiCommand api = new ApiCommand(fstElmnt, this.getParam(), this.getCommands()); - - //send a command - api.sendCommand(this.getClient(), null); - - //verify the response of the command - if ((api.getResponseType() == ResponseType.ERROR) && (api.getResponseCode() == 200)) { - logger.error("Test case " + api.getTestCaseInfo() + " failed. Command that was supposed to fail, passed. The command was sent with the following url " + - api.getUrl()); - error++; - } else if ((api.getResponseType() != ResponseType.ERROR) && (api.getResponseCode() == 200)) { - //verify if response is suppposed to be empty - if (api.getResponseType() == ResponseType.EMPTY) { - if (api.isEmpty() == true) { - logger.info("Test case " + api.getTestCaseInfo() + " passed"); - } else { - logger.error("Test case " + api.getTestCaseInfo() + " failed. Empty response was expected. Command was sent with url " + api.getUrl()); - } - } else { - if (api.isEmpty() != false) - logger.error("Test case " + api.getTestCaseInfo() + " failed. Non-empty response was expected. Command was sent with url " + api.getUrl()); - else { - //set parameters for the future use - if (api.setParam(this.getParam()) == false) { - logger.error("Exiting the test...Command " + api.getName() + - " didn't return parameters needed for the future use. The command was sent with url " + api.getUrl()); - return false; - } else if (api.getTestCaseInfo() != null) { - logger.info("Test case " + api.getTestCaseInfo() + " passed"); - } - } - } - } else if ((api.getResponseType() != ResponseType.ERROR) && (api.getResponseCode() != 200)) { - logger.error("Test case " + api.getTestCaseInfo() + " failed. Command was sent with url " + api.getUrl()); - if (api.getRequired() == true) { - logger.info("The command is required for the future use, so exiging"); - return false; - } - error++; - } else if (api.getTestCaseInfo() != null) { - logger.info("Test case " + api.getTestCaseInfo() + " passed"); - - } - } - -// //Try to create portForwarding rule for all available private/public ports -// ArrayList port = new ArrayList(); -// for (int i=1; i<65536; i++){ -// port.add(Integer.toString(i)); -// } -// -// //try all public ports -// for (String portValue : port) { -// try { -// String url = this.getHost() + ":8096/?command=createOrUpdateLoadBalancerRule&account=" + this.getParam().get("accountname") + "&publicip=" + this.getParam().get("boundaryip") + -// "&privateip=" + this.getParam().get("vmipaddress") + "&privateport=22&protocol=tcp&publicport=" + portValue; -// HttpClient client = new HttpClient(); -// HttpMethod method = new GetMethod(url); -// int responseCode = client.executeMethod(method); -// if (responseCode != 200 ) { -// error++; -// logger.error("Can't create LB rule for the public port " + portValue + ". Request was sent with url " + url); -// } -// }catch (Exception ex) { -// logger.error(ex); -// } -// } -// -// //try all private ports -// for (String portValue : port) { -// try { -// String url = this.getHost() + ":8096/?command=createOrUpdateLoadBalancerRule&account=" + this.getParam().get("accountname") + "&publicip=" + this.getParam().get("boundaryip") + -// "&privateip=" + this.getParam().get("vmipaddress") + "&publicport=22&protocol=tcp&privateport=" + portValue; -// HttpClient client = new HttpClient(); -// HttpMethod method = new GetMethod(url); -// int responseCode = client.executeMethod(method); -// if (responseCode != 200 ) { -// error++; -// logger.error("Can't create LB rule for the private port " + portValue + ". Request was sent with url " + url); -// } -// }catch (Exception ex) { -// logger.error(ex); -// } -// } - - if (error != 0) - return false; - else - return true; - } - -} diff --git a/test/src-not-used/main/java/com/cloud/test/regression/PortForwardingTest.java b/test/src-not-used/main/java/com/cloud/test/regression/PortForwardingTest.java deleted file mode 100644 index 40215c0ecb3..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/regression/PortForwardingTest.java +++ /dev/null @@ -1,141 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.regression; - -import java.util.HashMap; - -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -import com.cloud.test.regression.ApiCommand.ResponseType; - -public class PortForwardingTest extends TestCase { - - public PortForwardingTest() { - setClient(); - setParam(new HashMap()); - } - - @Override - public boolean executeTest() { - - int error = 0; - Element rootElement = getInputFile().get(0).getDocumentElement(); - NodeList commandLst = rootElement.getElementsByTagName("command"); - - //Analyze each command, send request and build the array list of api commands - for (int i = 0; i < commandLst.getLength(); i++) { - - Node fstNode = commandLst.item(i); - Element fstElmnt = (Element)fstNode; - - //new command - ApiCommand api = new ApiCommand(fstElmnt, getParam(), getCommands()); - - //send a command - api.sendCommand(getClient(), null); - - //verify the response of the command - if ((api.getResponseType() != ResponseType.ERROR) && (api.getResponseCode() == 200)) { - logger.error("Test case " + api.getTestCaseInfo() + " failed. Command that was supposed to fail, passed. The command was sent with the following url " + - api.getUrl()); - error++; - } else if ((api.getResponseType() != ResponseType.ERROR) && (api.getResponseCode() == 200)) { - //verify if response is suppposed to be empty - if (api.getResponseType() == ResponseType.EMPTY) { - if (api.isEmpty() == true) { - logger.info("Test case " + api.getTestCaseInfo() + " passed"); - } else { - logger.error("Test case " + api.getTestCaseInfo() + " failed. Empty response was expected. Command was sent with url " + api.getUrl()); - } - } else { - if (api.isEmpty() != false) - logger.error("Test case " + api.getTestCaseInfo() + " failed. Non-empty response was expected. Command was sent with url " + api.getUrl()); - else { - //set parameters for the future use - if (api.setParam(getParam()) == false) { - logger.error("Exiting the test...Command " + api.getName() + - " didn't return parameters needed for the future use. The command was sent with url " + api.getUrl()); - return false; - } else if (api.getTestCaseInfo() != null) { - logger.info("Test case " + api.getTestCaseInfo() + " passed"); - } - } - } - } else if ((api.getResponseType() != ResponseType.ERROR) && (api.getResponseCode() != 200)) { - logger.error("Test case " + api.getTestCaseInfo() + " failed . Command was sent with url " + api.getUrl()); - if (api.getRequired() == true) { - logger.info("The command is required for the future use, so exiging"); - return false; - } - error++; - } else if (api.getTestCaseInfo() != null) { - logger.info("Test case " + api.getTestCaseInfo() + " passed"); - - } - } - -// //Try to create portForwarding rule for all available private/public ports -// ArrayList port = new ArrayList(); -// for (int i=1; i<65536; i++){ -// port.add(Integer.toString(i)); -// } -// -// //try all public ports -// for (String portValue : port) { -// try { -// logger.info("public port is " + portValue); -// String url = this.getHost() + ":8096/?command=createOrUpdateIpForwardingRule&account=" + this.getParam().get("accountname") + "&publicip=" + this.getParam().get("boundaryip") + -// "&privateip=" + this.getParam().get("vmipaddress") + "&privateport=22&protocol=tcp&publicport=" + portValue; -// HttpClient client = new HttpClient(); -// HttpMethod method = new GetMethod(url); -// int responseCode = client.executeMethod(method); -// if (responseCode != 200 ) { -// error++; -// logger.error("Can't create portForwarding rule for the public port " + portValue + ". Request was sent with url " + url); -// } -// }catch (Exception ex) { -// logger.error(ex); -// } -// } -// -// //try all private ports -// for (String portValue : port) { -// try { -// String url = this.getHost() + ":8096/?command=createOrUpdateIpForwardingRule&account=" + -// this.getParam().get("accountname") + "&publicip=" + this.getParam().get("boundaryip") + -// "&privateip=" + this.getParam().get("vmipaddress") + "&publicport=22&protocol=tcp&privateport=" + portValue; -// HttpClient client = new HttpClient(); -// HttpMethod method = new GetMethod(url); -// int responseCode = client.executeMethod(method); -// if (responseCode != 200 ) { -// error++; -// logger.error("Can't create portForwarding rule for the private port " + portValue + ". Request was sent with url " + url); -// } -// }catch (Exception ex) { -// logger.error(ex); -// } -// } - - if (error != 0) - return false; - else - return true; - } - -} diff --git a/test/src-not-used/main/java/com/cloud/test/regression/SanityTest.java b/test/src-not-used/main/java/com/cloud/test/regression/SanityTest.java deleted file mode 100644 index eeeaf20b6d9..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/regression/SanityTest.java +++ /dev/null @@ -1,84 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.regression; - -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -public class SanityTest extends TestCase { - - - public SanityTest() { - this.setClient(); - } - - @Override - public boolean executeTest() { - int error = 0; - Element rootElement = this.getInputFile().get(0).getDocumentElement(); - NodeList commandLst = rootElement.getElementsByTagName("command"); - //Analyze each command, send request and build the array list of api commands - for (int i = 0; i < commandLst.getLength(); i++) { - - Node fstNode = commandLst.item(i); - Element fstElmnt = (Element)fstNode; - - //new command - ApiCommand api = new ApiCommand(fstElmnt, this.getParam(), this.getCommands()); - - api.sendCommand(this.getClient(), null); - - //verify the response parameters - if ((api.getResponseCode() != 200) && (api.getRequired() == true)) { - logger.error("Exiting the test....Command " + api.getName() + " required for the future run, failed with an error code " + api.getResponseCode() + - ". Command was sent with the url " + api.getUrl()); - return false; - } else if (api.getResponseCode() != 200) { - error++; - logger.error("Test " + api.getTestCaseInfo() + " failed with an error code " + api.getResponseCode() + " . Command was sent with url " + api.getUrl()); - } else { - //set parameters for the future use - if (api.setParam(this.getParam()) == false) { - logger.error("Exiting the test...Command " + api.getName() + " didn't return parameters needed for the future use. Command was sent with url " + - api.getUrl()); - return false; - } - - //verify parameters - if (api.verifyParam() == false) { - logger.error("Test " + api.getTestCaseInfo() + " failed. Verification for returned parameters failed. The command was sent with url " + - api.getUrl()); - error++; - } else if (api.getTestCaseInfo() != null) { - logger.info("Test " + api.getTestCaseInfo() + " passed"); - } - } - } - - //verify event - boolean eventResult = - ApiCommand.verifyEvents("../metadata/func/regression_events.properties", "INFO", "http://" + this.getParam().get("hostip") + ":8096", - this.getParam().get("accountname")); - logger.info("listEvent command verification result is " + eventResult); - - if (error != 0) - return false; - else - return true; - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/regression/Test.java b/test/src-not-used/main/java/com/cloud/test/regression/Test.java deleted file mode 100644 index 32057f6efaf..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/regression/Test.java +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.regression; - -import java.util.ArrayList; -import java.util.HashMap; - -import org.apache.commons.httpclient.HttpClient; -import org.apache.commons.httpclient.HttpMethod; -import org.apache.commons.httpclient.methods.GetMethod; -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -public class Test extends TestCase { - - public Test() { - this.setClient(); - this.setParam(new HashMap()); - } - - @Override - public boolean executeTest() { - - int error = 0; - Element rootElement = this.getInputFile().get(0).getDocumentElement(); - NodeList commandLst = rootElement.getElementsByTagName("command"); - - //Analyze each command, send request and build the array list of api commands - for (int i = 0; i < commandLst.getLength(); i++) { - Node fstNode = commandLst.item(i); - Element fstElmnt = (Element)fstNode; - - //new command - ApiCommand api = new ApiCommand(fstElmnt, this.getParam(), this.getCommands()); - - //send a command - api.sendCommand(this.getClient(), null); - - } - - //Try to create portForwarding rule for all available private/public ports - ArrayList port = new ArrayList(); - for (int j = 1; j < 1000; j++) { - port.add(Integer.toString(j)); - } - - //try all public ports - for (String portValue : port) { - try { - logger.info("public port is " + portValue); - String url = - "http://" + this.getParam().get("hostip") + ":8096/?command=createNetworkRule&publicPort=" + portValue + - "&privatePort=22&protocol=tcp&isForward=true&securityGroupId=1&account=admin"; - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - if (responseCode != 200) { - error++; - logger.error("Can't create portForwarding network rule for the public port " + portValue + ". Request was sent with url " + url); - } - } catch (Exception ex) { - logger.error(ex); - } - } - - if (error != 0) - return false; - else - return true; - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/regression/TestCase.java b/test/src-not-used/main/java/com/cloud/test/regression/TestCase.java deleted file mode 100644 index 26b534e976c..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/regression/TestCase.java +++ /dev/null @@ -1,139 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.regression; - -import java.io.File; -import java.io.FileInputStream; -import java.sql.Connection; -import java.sql.DriverManager; -import java.util.ArrayList; -import java.util.Enumeration; -import java.util.HashMap; -import java.util.Properties; - -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; - -import org.apache.commons.httpclient.HttpClient; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.w3c.dom.Document; - -public abstract class TestCase { - - public static Logger LOGGER = LogManager.getLogger(TestCase.class.getName()); - private Connection conn; - private ArrayList inputFile = new ArrayList(); - private HttpClient client; - private String testCaseName; - private HashMap param = new HashMap(); - private HashMap commands = new HashMap(); - - public HashMap getParam() { - return param; - } - - public void setParam(HashMap param) { - this.param = param; - } - - public HashMap getCommands() { - return commands; - } - - public void setCommands() { - File asyncCommands = null; - if (param.get("apicommands") == null) { - LOGGER.info("Unable to get the list of commands, exiting"); - System.exit(1); - } else { - asyncCommands = new File(param.get("apicommands")); - } - try { - Properties pro = new Properties(); - FileInputStream in = new FileInputStream(asyncCommands); - pro.load(in); - Enumeration en = pro.propertyNames(); - while (en.hasMoreElements()) { - String key = (String)en.nextElement(); - commands.put(key, pro.getProperty(key)); - } - } catch (Exception ex) { - LOGGER.info("Unable to find the file " + param.get("apicommands") + " due to following exception " + ex); - } - - } - - public Connection getConn() { - return conn; - } - - public void setConn(String dbPassword) { - this.conn = null; - try { - Class.forName("com.mysql.jdbc.Driver"); - this.conn = DriverManager.getConnection("jdbc:mysql://" + param.get("db") + "/cloud?" + TransactionLegacy.CONNECTION_PARAMS, "root", dbPassword); - if (!this.conn.isValid(0)) { - LOGGER.error("Connection to DB failed to establish"); - } - - } catch (Exception ex) { - LOGGER.error(ex); - } - } - - public void setInputFile(ArrayList fileNameInput) { - for (String fileName : fileNameInput) { - File file = new File(fileName); - DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); - Document doc = null; - try { - DocumentBuilder builder = factory.newDocumentBuilder(); - doc = builder.parse(file); - doc.getDocumentElement().normalize(); - } catch (Exception ex) { - LOGGER.error("Unable to load " + fileName + " due to ", ex); - } - this.inputFile.add(doc); - } - } - - public ArrayList getInputFile() { - return inputFile; - } - - public void setTestCaseName(String testCaseName) { - this.testCaseName = testCaseName; - } - - public String getTestCaseName() { - return this.testCaseName; - } - - public void setClient() { - HttpClient client = new HttpClient(); - this.client = client; - } - - public HttpClient getClient() { - return this.client; - } - - //abstract methods - public abstract boolean executeTest(); - -} diff --git a/test/src-not-used/main/java/com/cloud/test/regression/TestCaseEngine.java b/test/src-not-used/main/java/com/cloud/test/regression/TestCaseEngine.java deleted file mode 100644 index 18f35e9ce6f..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/regression/TestCaseEngine.java +++ /dev/null @@ -1,276 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.regression; - -import java.io.File; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.Set; - -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -public class TestCaseEngine { - - protected Logger logger = LogManager.getLogger(getClass()); - public static String s_fileName = "../metadata/adapter.xml"; - public static HashMap s_globalParameters = new HashMap(); - protected static HashMap s_componentMap = new HashMap(); - protected static HashMap> s_inputFile = new HashMap>(); - protected static String s_testCaseName = new String(); - protected static ArrayList s_keys = new ArrayList(); - private static ThreadLocal s_result = new ThreadLocal(); - public static int s_numThreads = 1; - public static boolean s_repeat = false; - public static boolean s_printUrl = false; - public static String s_type = "All"; - public static boolean s_isSanity = false; - public static boolean s_isRegression = false; - private static int s_failure = 0; - - public static void main(String args[]) { - - // Parameters - List argsList = Arrays.asList(args); - Iterator iter = argsList.iterator(); - while (iter.hasNext()) { - String arg = iter.next(); - // is stress? - if (arg.equals("-t")) { - s_numThreads = Integer.parseInt(iter.next()); - } - // do you want to print url for all commands? - if (arg.equals("-p")) { - s_printUrl = true; - } - - //type of the test: sanity, regression, all (default) - if (arg.equals("-type")) { - s_type = iter.next(); - } - - if (arg.equals("-repeat")) { - s_repeat = Boolean.valueOf(iter.next()); - } - - if (arg.equals("-filename")) { - s_fileName = iter.next(); - } - } - - if (s_type.equalsIgnoreCase("sanity")) - s_isSanity = true; - else if (s_type.equalsIgnoreCase("regression")) - s_isRegression = true; - - try { - // parse adapter.xml file to get list of tests to execute - File file = new File(s_fileName); - DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); - DocumentBuilder builder = factory.newDocumentBuilder(); - Document doc = builder.parse(file); - doc.getDocumentElement().normalize(); - Element root = doc.getDocumentElement(); - - // set global parameters - setGlobalParams(root); - - // populate _componentMap - setComponent(root); - - // set error to 0 by default - - // execute test - for (int i = 0; i < s_numThreads; i++) { - if (s_numThreads > 1) { - logger.info("STARTING STRESS TEST IN " + s_numThreads + " THREADS"); - } else { - logger.info("STARTING FUNCTIONAL TEST"); - } - new Thread(new Runnable() { - @Override - public void run() { - do { - if (s_numThreads == 1) { - try { - for (String key : s_keys) { - Class c = Class.forName(s_componentMap.get(key)); - TestCase component = (TestCase)c.newInstance(); - executeTest(key, c, component); - } - } catch (Exception ex1) { - logger.error(ex1); - } finally { - if (s_failure > 0) { - System.exit(1); - } - } - } else { - Random ran = new Random(); - Integer randomNumber = Math.abs(ran.nextInt(s_keys.size())); - try { - String key = s_keys.get(randomNumber); - Class c = Class.forName(s_componentMap.get(key)); - TestCase component = (TestCase)c.newInstance(); - executeTest(key, c, component); - } catch (Exception e) { - logger.error("Error in thread ", e); - } - } - } while (s_repeat); - } - }).start(); - } - - } catch (Exception exc) { - logger.error(exc); - } - } - - public static void setGlobalParams(Element rootElement) { - NodeList globalParam = rootElement.getElementsByTagName("globalparam"); - Element parameter = (Element)globalParam.item(0); - NodeList paramLst = parameter.getElementsByTagName("param"); - - for (int i = 0; i < paramLst.getLength(); i++) { - Element paramElement = (Element)paramLst.item(i); - - if (paramElement.getNodeType() == Node.ELEMENT_NODE) { - Element itemElement = paramElement; - NodeList itemName = itemElement.getElementsByTagName("name"); - Element itemNameElement = (Element)itemName.item(0); - NodeList itemVariable = itemElement.getElementsByTagName("variable"); - Element itemVariableElement = (Element)itemVariable.item(0); - s_globalParameters.put(itemVariableElement.getTextContent(), itemNameElement.getTextContent()); - } - } - } - - public static void setComponent(Element rootElement) { - NodeList testLst = rootElement.getElementsByTagName("test"); - for (int j = 0; j < testLst.getLength(); j++) { - Element testElement = (Element)testLst.item(j); - - if (testElement.getNodeType() == Node.ELEMENT_NODE) { - Element itemElement = testElement; - - // get test case name - NodeList testCaseNameList = itemElement.getElementsByTagName("testname"); - if (testCaseNameList != null) { - s_testCaseName = ((Element)testCaseNameList.item(0)).getTextContent(); - } - - if (s_isSanity == true && !s_testCaseName.equals("SANITY TEST")) - continue; - else if (s_isRegression == true && !(s_testCaseName.equals("SANITY TEST") || s_testCaseName.equals("REGRESSION TEST"))) - continue; - - // set class name - NodeList className = itemElement.getElementsByTagName("class"); - if ((className.getLength() == 0) || (className == null)) { - s_componentMap.put(s_testCaseName, "com.cloud.test.regression.VMApiTest"); - } else { - String name = ((Element)className.item(0)).getTextContent(); - s_componentMap.put(s_testCaseName, name); - } - - // set input file name - NodeList inputFileNameLst = itemElement.getElementsByTagName("filename"); - s_inputFile.put(s_testCaseName, new ArrayList()); - for (int k = 0; k < inputFileNameLst.getLength(); k++) { - String inputFileName = ((Element)inputFileNameLst.item(k)).getTextContent(); - s_inputFile.get(s_testCaseName).add(inputFileName); - } - } - } - - //If sanity test required, make sure that SANITY TEST componennt got loaded - if (s_isSanity == true && s_componentMap.size() == 0) { - logger.error("FAILURE!!! Failed to load SANITY TEST component. Verify that the test is uncommented in adapter.xml"); - System.exit(1); - } - - if (s_isRegression == true && s_componentMap.size() != 2) { - logger.error("FAILURE!!! Failed to load SANITY TEST or REGRESSION TEST components. Verify that these tests are uncommented in adapter.xml"); - System.exit(1); - } - - // put all keys from _componentMap to the ArrayList - Set set = s_componentMap.entrySet(); - Iterator it = set.iterator(); - while (it.hasNext()) { - Map.Entry me = (Map.Entry)it.next(); - String key = (String)me.getKey(); - s_keys.add(key); - } - - } - - public static boolean executeTest(String key, Class c, TestCase component) { - boolean finalResult = false; - try { - logger.info("Starting \"" + key + "\" test...\n\n"); - - // set global parameters - HashMap updateParam = new HashMap(); - updateParam.putAll(s_globalParameters); - component.setParam(updateParam); - - // set DB ip address - component.setConn(s_globalParameters.get("dbPassword")); - - // set commands list - component.setCommands(); - - // set input file - if (s_inputFile.get(key) != null) { - component.setInputFile(s_inputFile.get(key)); - } - - // set test case name - if (key != null) { - component.setTestCaseName(s_testCaseName); - } - - // execute method - s_result.set(component.executeTest()); - if (s_result.get().toString().equals("false")) { - logger.error("FAILURE!!! Test \"" + key + "\" failed\n\n\n"); - s_failure++; - } else { - finalResult = true; - logger.info("SUCCESS!!! Test \"" + key + "\" passed\n\n\n"); - } - - } catch (Exception ex) { - logger.error("error during test execution ", ex); - } - return finalResult; - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/regression/VMApiTest.java b/test/src-not-used/main/java/com/cloud/test/regression/VMApiTest.java deleted file mode 100644 index f7ef01a2ed2..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/regression/VMApiTest.java +++ /dev/null @@ -1,89 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.regression; - -import java.util.HashMap; - -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -import com.cloud.test.regression.ApiCommand.ResponseType; - -public class VMApiTest extends TestCase { - - public VMApiTest() { - this.setClient(); - this.setParam(new HashMap()); - } - - @Override - public boolean executeTest() { - int error = 0; - Element rootElement = this.getInputFile().get(0).getDocumentElement(); - NodeList commandLst = rootElement.getElementsByTagName("command"); - - //Analyze each command, send request and build the array list of api commands - for (int i = 0; i < commandLst.getLength(); i++) { - Node fstNode = commandLst.item(i); - Element fstElmnt = (Element)fstNode; - - //new command - ApiCommand api = new ApiCommand(fstElmnt, this.getParam(), this.getCommands()); - - //send a command - api.sendCommand(this.getClient(), this.getConn()); - - //verify the response of the command - if ((api.getResponseType() == ResponseType.ERROR) && (api.getResponseCode() == 200)) { - logger.error("Test case " + api.getTestCaseInfo() + " failed. Command that was supposed to fail, passed. The command was sent with the following url " + - api.getUrl()); - error++; - } else if ((api.getResponseType() != ResponseType.ERROR) && (api.getResponseCode() == 200)) { - //set parameters for the future use - if (api.setParam(this.getParam()) == false) { - logger.error("Exiting the test...Command " + api.getName() + " didn't return parameters needed for the future use. The command was sent with url " + - api.getUrl()); - return false; - } - //verify parameters - if (api.verifyParam() == false) { - logger.error("Test " + api.getTestCaseInfo() + " failed. Verification for returned parameters failed. The command was sent with url " + - api.getUrl()); - error++; - } else { - logger.info("Test " + api.getTestCaseInfo() + " passed"); - } - } else if ((api.getResponseType() != ResponseType.ERROR) && (api.getResponseCode() != 200)) { - logger.error("Test case " + api.getTestCaseInfo() + " failed with an error code " + api.getResponseCode() + " . Command was sent with url " + - api.getUrl()); - if (api.getRequired() == true) { - logger.info("The command is required for the future use, so exiging"); - return false; - } - error++; - } else if (api.getTestCaseInfo() != null) { - logger.info("Test case " + api.getTestCaseInfo() + " passed"); - - } - } - if (error != 0) - return false; - else - return true; - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/stress/SshTest.java b/test/src-not-used/main/java/com/cloud/test/stress/SshTest.java deleted file mode 100644 index 6ebaadbffdc..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/stress/SshTest.java +++ /dev/null @@ -1,91 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.stress; - -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - -import com.trilead.ssh2.Connection; -import com.trilead.ssh2.Session; - -public class SshTest { - - protected Logger logger = LogManager.getLogger(getClass()); - public static String host = ""; - public static String password = "password"; - public static String url = "http://google.com"; - - public static void main(String[] args) { - - // Parameters - List argsList = Arrays.asList(args); - Iterator iter = argsList.iterator(); - while (iter.hasNext()) { - String arg = iter.next(); - if (arg.equals("-h")) { - host = iter.next(); - } - if (arg.equals("-p")) { - password = iter.next(); - } - - if (arg.equals("-u")) { - url = iter.next(); - } - } - - if (host == null || host.equals("")) { - logger.info("Did not receive a host back from test, ignoring ssh test"); - System.exit(2); - } - - if (password == null) { - logger.info("Did not receive a password back from test, ignoring ssh test"); - System.exit(2); - } - - try { - logger.info("Attempting to SSH into host " + host); - Connection conn = new Connection(host); - conn.connect(null, 60000, 60000); - - logger.info("User + ssHed successfully into host " + host); - - boolean isAuthenticated = conn.authenticateWithPassword("root", password); - - if (isAuthenticated == false) { - logger.info("Authentication failed for root with password" + password); - System.exit(2); - } - - String linuxCommand = "wget " + url; - Session sess = conn.openSession(); - sess.execCommand(linuxCommand); - sess.close(); - conn.close(); - - } catch (Exception e) { - logger.error("SSH test fail with error", e); - System.exit(2); - } - } - -} diff --git a/test/src-not-used/main/java/com/cloud/test/stress/StressTestDirectAttach.java b/test/src-not-used/main/java/com/cloud/test/stress/StressTestDirectAttach.java deleted file mode 100644 index d87677a6da4..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/stress/StressTestDirectAttach.java +++ /dev/null @@ -1,1354 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.stress; - -import java.io.IOException; -import java.io.InputStream; -import java.math.BigInteger; -import java.net.URLEncoder; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Random; - -import javax.crypto.Mac; -import javax.crypto.spec.SecretKeySpec; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; - -import org.apache.commons.codec.binary.Base64; -import org.apache.commons.httpclient.HttpClient; -import org.apache.commons.httpclient.HttpException; -import org.apache.commons.httpclient.HttpMethod; -import org.apache.commons.httpclient.methods.GetMethod; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.apache.log4j.NDC; -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -import com.trilead.ssh2.ChannelCondition; -import com.trilead.ssh2.Connection; -import com.trilead.ssh2.SCPClient; -import com.trilead.ssh2.Session; - -import com.cloud.utils.exception.CloudRuntimeException; - -public class StressTestDirectAttach { - private static long sleepTime = 180000L; // default 0 - private static boolean cleanUp = true; - protected Logger logger = LogManager.getLogger(getClass()); - private static boolean repeat = true; - private static String[] users = null; - private static boolean internet = false; - private static ThreadLocal s_linuxIP = new ThreadLocal(); - private static ThreadLocal s_linuxVmId = new ThreadLocal(); - private static ThreadLocal s_linuxVmId1 = new ThreadLocal(); - private static ThreadLocal s_linuxPassword = new ThreadLocal(); - private static ThreadLocal s_windowsIP = new ThreadLocal(); - private static ThreadLocal s_secretKey = new ThreadLocal(); - private static ThreadLocal s_apiKey = new ThreadLocal(); - private static ThreadLocal s_userId = new ThreadLocal(); - private static ThreadLocal s_account = new ThreadLocal(); - private static ThreadLocal s_domainRouterId = new ThreadLocal(); - private static ThreadLocal s_newVolume = new ThreadLocal(); - private static ThreadLocal s_newVolume1 = new ThreadLocal(); - private static DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); - private static int usageIterator = 1; - private static int numThreads = 1; - private static int wait = 5000; - private static String accountName = null; - private static String zoneId = "1"; - private static String serviceOfferingId = "13"; - private static String diskOfferingId = "11"; - private static String diskOfferingId1 = "12"; - - private static final int MAX_RETRY_LINUX = 10; - private static final int MAX_RETRY_WIN = 10; - - public static void main(String[] args) { - String host = "http://localhost"; - String port = "8092"; - String devPort = "8080"; - String apiUrl = "/client/api"; - - try { - // Parameters - List argsList = Arrays.asList(args); - Iterator iter = argsList.iterator(); - while (iter.hasNext()) { - String arg = iter.next(); - // host - if (arg.equals("-h")) { - host = "http://" + iter.next(); - } - - if (arg.equals("-p")) { - port = iter.next(); - } - if (arg.equals("-dp")) { - devPort = iter.next(); - } - - if (arg.equals("-t")) { - numThreads = Integer.parseInt(iter.next()); - } - - if (arg.equals("-s")) { - sleepTime = Long.parseLong(iter.next()); - } - if (arg.equals("-a")) { - accountName = iter.next(); - } - - if (arg.equals("-c")) { - cleanUp = Boolean.parseBoolean(iter.next()); - if (!cleanUp) - sleepTime = 0L; // no need to wait if we don't ever - // cleanup - } - - if (arg.equals("-r")) { - repeat = Boolean.parseBoolean(iter.next()); - } - - if (arg.equals("-i")) { - internet = Boolean.parseBoolean(iter.next()); - } - - if (arg.equals("-w")) { - wait = Integer.parseInt(iter.next()); - } - - if (arg.equals("-z")) { - zoneId = iter.next(); - } - - if (arg.equals("-so")) { - serviceOfferingId = iter.next(); - } - - } - - final String server = host + ":" + port + "/"; - final String developerServer = host + ":" + devPort + apiUrl; - logger.info("Starting test against server: " + server + " with " + numThreads + " thread(s)"); - if (cleanUp) - logger.info("Clean up is enabled, each test will wait " + sleepTime + " ms before cleaning up"); - - for (int i = 0; i < numThreads; i++) { - new Thread(new Runnable() { - @Override - public void run() { - do { - String username = null; - try { - long now = System.currentTimeMillis(); - Random ran = new Random(); - username = Math.abs(ran.nextInt()) + "-user"; - NDC.push(username); - - logger.info("Starting test for the user " + username); - int response = executeDeployment(server, developerServer, username); - boolean success = false; - String reason = null; - - if (response == 200) { - success = true; - if (internet) { - logger.info("Deploy successful...waiting 5 minute before SSH tests"); - Thread.sleep(300000L); // Wait 60 - // seconds so - // the windows VM - // can boot up and do a sys prep. - - logger.info("Begin Linux SSH test for account " + s_account.get()); - reason = sshTest(s_linuxIP.get(), s_linuxPassword.get()); - - if (reason == null) { - logger.info("Linux SSH test successful for account " + s_account.get()); - } - } - if (reason == null) { - if (internet) { - logger.info("Windows SSH test successful for account " + s_account.get()); - } else { - logger.info("deploy test successful....now cleaning up"); - if (cleanUp) { - logger.info("Waiting " + sleepTime + " ms before cleaning up vms"); - Thread.sleep(sleepTime); - } else { - success = true; - } - } - - if (usageIterator >= numThreads) { - int eventsAndBillingResponseCode = executeEventsAndBilling(server, developerServer); - logger.info("events and usage records command finished with response code: " + eventsAndBillingResponseCode); - usageIterator = 1; - - } else { - logger.info("Skipping events and usage records for this user: usageIterator " + usageIterator + " and number of Threads " + - numThreads); - usageIterator++; - } - - if ((users == null) && (accountName == null)) { - logger.info("Sending cleanup command"); - int cleanupResponseCode = executeCleanup(server, developerServer, username); - logger.info("cleanup command finished with response code: " + cleanupResponseCode); - success = (cleanupResponseCode == 200); - } else { - logger.info("Sending stop DomR / destroy VM command"); - int stopResponseCode = executeStop(server, developerServer, username); - logger.info("stop(destroy) command finished with response code: " + stopResponseCode); - success = (stopResponseCode == 200); - } - - } else { - // Just stop but don't destroy the - // VMs/Routers - logger.info("SSH test failed for account " + s_account.get() + "with reason '" + reason + "', stopping VMs"); - int stopResponseCode = executeStop(server, developerServer, username); - logger.info("stop command finished with response code: " + stopResponseCode); - success = false; // since the SSH test - // failed, mark the - // whole test as - // failure - } - } else { - // Just stop but don't destroy the - // VMs/Routers - logger.info("Deploy test failed with reason '" + reason + "', stopping VMs"); - int stopResponseCode = executeStop(server, developerServer, username); - logger.info("stop command finished with response code: " + stopResponseCode); - success = false; // since the deploy test - // failed, mark the - // whole test as failure - } - - if (success) { - logger.info("***** Completed test for user : " + username + " in " + ((System.currentTimeMillis() - now) / 1000L) + " seconds"); - - } else { - logger.info("##### FAILED test for user : " + username + " in " + ((System.currentTimeMillis() - now) / 1000L) + - " seconds with reason : " + reason); - } - logger.info("Sleeping for " + wait + " seconds before starting next iteration"); - Thread.sleep(wait); - } catch (Exception e) { - logger.warn("Error in thread", e); - try { - int stopResponseCode = executeStop(server, developerServer, username); - logger.info("stop response code: " + stopResponseCode); - } catch (Exception e1) { - logger.info("[ignored]" - + "error executing stop during stress test: " + e1.getLocalizedMessage()); - } - } finally { - NDC.clear(); - } - } while (repeat); - } - }).start(); - } - } catch (Exception e) { - logger.error(e); - } - } - - public static Map> getMultipleValuesFromXML(InputStream is, String[] tagNames) { - Map> returnValues = new HashMap>(); - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); - for (int i = 0; i < tagNames.length; i++) { - NodeList targetNodes = rootElement.getElementsByTagName(tagNames[i]); - if (targetNodes.getLength() <= 0) { - logger.error("no " + tagNames[i] + " tag in XML response...returning null"); - } else { - List valueList = new ArrayList(); - for (int j = 0; j < targetNodes.getLength(); j++) { - Node node = targetNodes.item(j); - valueList.add(node.getTextContent()); - } - returnValues.put(tagNames[i], valueList); - } - } - } catch (Exception ex) { - logger.error(ex); - } - return returnValues; - } - - public static Map getSingleValueFromXML(InputStream is, String[] tagNames) { - Map returnValues = new HashMap(); - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); - - for (int i = 0; i < tagNames.length; i++) { - NodeList targetNodes = rootElement.getElementsByTagName(tagNames[i]); - if (targetNodes.getLength() <= 0) { - logger.error("no " + tagNames[i] + " tag in XML response...returning null"); - } else { - returnValues.put(tagNames[i], targetNodes.item(0).getTextContent()); - } - } - } catch (Exception ex) { - logger.error("error processing XML", ex); - } - return returnValues; - } - - public static Map getSingleValueFromXML(Element rootElement, String[] tagNames) { - Map returnValues = new HashMap(); - if (rootElement == null) { - logger.error("Root element is null, can't get single value from xml"); - return null; - } - try { - for (int i = 0; i < tagNames.length; i++) { - NodeList targetNodes = rootElement.getElementsByTagName(tagNames[i]); - if (targetNodes.getLength() <= 0) { - logger.error("no " + tagNames[i] + " tag in XML response...returning null"); - } else { - returnValues.put(tagNames[i], targetNodes.item(0).getTextContent()); - } - } - } catch (Exception ex) { - logger.error("error processing XML", ex); - } - return returnValues; - } - - private static List getNonSourceNatIPs(InputStream is) { - List returnValues = new ArrayList(); - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); - NodeList allocatedIpAddrNodes = rootElement.getElementsByTagName("publicipaddress"); - for (int i = 0; i < allocatedIpAddrNodes.getLength(); i++) { - Node allocatedIpAddrNode = allocatedIpAddrNodes.item(i); - NodeList childNodes = allocatedIpAddrNode.getChildNodes(); - String ipAddress = null; - boolean isSourceNat = true; // assume it's source nat until we - // find otherwise - for (int j = 0; j < childNodes.getLength(); j++) { - Node n = childNodes.item(j); - if ("ipaddress".equals(n.getNodeName())) { - ipAddress = n.getTextContent(); - } else if ("issourcenat".equals(n.getNodeName())) { - isSourceNat = Boolean.parseBoolean(n.getTextContent()); - } - } - if ((ipAddress != null) && !isSourceNat) { - returnValues.add(ipAddress); - } - } - } catch (Exception ex) { - logger.error(ex); - } - return returnValues; - } - - private static List getSourceNatIPs(InputStream is) { - List returnValues = new ArrayList(); - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); - NodeList allocatedIpAddrNodes = rootElement.getElementsByTagName("publicipaddress"); - for (int i = 0; i < allocatedIpAddrNodes.getLength(); i++) { - Node allocatedIpAddrNode = allocatedIpAddrNodes.item(i); - NodeList childNodes = allocatedIpAddrNode.getChildNodes(); - String ipAddress = null; - boolean isSourceNat = false; // assume it's *not* source nat until we find otherwise - for (int j = 0; j < childNodes.getLength(); j++) { - Node n = childNodes.item(j); - if ("ipaddress".equals(n.getNodeName())) { - ipAddress = n.getTextContent(); - } else if ("issourcenat".equals(n.getNodeName())) { - isSourceNat = Boolean.parseBoolean(n.getTextContent()); - } - } - if ((ipAddress != null) && isSourceNat) { - returnValues.add(ipAddress); - } - } - } catch (Exception ex) { - logger.error(ex); - } - return returnValues; - } - - private static String executeRegistration(String server, String username, String password) throws HttpException, IOException { - String url = server + "?command=registerUserKeys&id=" + s_userId.get().toString(); - logger.info("registering: " + username); - String returnValue = null; - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map requestKeyValues = getSingleValueFromXML(is, new String[] {"apikey", "secretkey"}); - s_apiKey.set(requestKeyValues.get("apikey")); - returnValue = requestKeyValues.get("secretkey"); - } else { - logger.error("registration failed with error code: " + responseCode); - } - return returnValue; - } - - private static Integer executeDeployment(String server, String developerServer, String username) throws HttpException, IOException { - // test steps: - // - create user - // - deploy Windows VM - // - deploy Linux VM - // - associate IP address - // - create two IP forwarding rules - // - create load balancer rule - // - list IP forwarding rules - // - list load balancer rules - - // ----------------------------- - // CREATE USER - // ----------------------------- - String encodedUsername = URLEncoder.encode(username, "UTF-8"); - String encryptedPassword = createMD5Password(username); - String encodedPassword = URLEncoder.encode(encryptedPassword, "UTF-8"); - - String url = - server + "?command=createUser&username=" + encodedUsername + "&password=" + encodedPassword + - "&firstname=Test&lastname=Test&email=test@vmops.com&domainId=1&accounttype=0"; - if (accountName != null) { - url = - server + "?command=createUser&username=" + encodedUsername + "&password=" + encodedPassword + - "&firstname=Test&lastname=Test&email=test@vmops.com&domainId=1&accounttype=0&account=" + accountName; - } - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - long userId = -1; - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map userIdValues = getSingleValueFromXML(is, new String[] {"id", "account"}); - String userIdStr = userIdValues.get("id"); - logger.info("created user " + username + " with id " + userIdStr); - if (userIdStr != null) { - userId = Long.parseLong(userIdStr); - s_userId.set(userId); - s_account.set(userIdValues.get("account")); - if (userId == -1) { - logger.error("create user (" + username + ") failed to retrieve a valid user id, aborting depolyment test"); - return -1; - } - } - } else { - logger.error("create user test failed for user " + username + " with error code :" + responseCode); - return responseCode; - } - - s_secretKey.set(executeRegistration(server, username, username)); - - if (s_secretKey.get() == null) { - logger.error("FAILED to retrieve secret key during registration, skipping user: " + username); - return -1; - } else { - logger.info("got secret key: " + s_secretKey.get()); - logger.info("got api key: " + s_apiKey.get()); - } - - // --------------------------------- - // CREATE NETWORK GROUP AND ADD INGRESS RULE TO IT - // --------------------------------- - String networkAccount = null; - if (accountName != null) { - networkAccount = accountName; - } else { - networkAccount = encodedUsername; - } - String encodedApiKey = URLEncoder.encode(s_apiKey.get(), "UTF-8"); - String requestToSign = "apikey=" + encodedApiKey + "&command=createSecurityGroup&name=" + encodedUsername; - requestToSign = requestToSign.toLowerCase(); - String signature = signRequest(requestToSign, s_secretKey.get()); - String encodedSignature = URLEncoder.encode(signature, "UTF-8"); - url = developerServer + "?command=createSecurityGroup&name=" + encodedUsername + "&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map values = getSingleValueFromXML(is, new String[] {"id"}); - - if (values.get("id") == null) { - logger.info("Create network rule response code: 401"); - return 401; - } else { - logger.info("Create security group response code: " + responseCode); - } - } else { - logger.error("Create security group failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - String encodedCidr = URLEncoder.encode("192.168.1.143/32", "UTF-8"); - url = - server + "?command=authorizeSecurityGroupIngress&cidrlist=" + encodedCidr + "&endport=22&" + "securitygroupname=" + encodedUsername + - "&protocol=tcp&startport=22&account=" + networkAccount + "&domainid=1"; - - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, new String[] {"id"}); - - if (values.get("id") == null) { - logger.info("Authorise security group ingress response code: 401"); - return 401; - } else { - logger.info("Authorise security group ingress response code: " + responseCode); - } - } else { - logger.error("Authorise security group ingress failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // --------------------------------- - // DEPLOY LINUX VM - // --------------------------------- - { - long templateId = 2; - String encodedZoneId = URLEncoder.encode("" + zoneId, "UTF-8"); - String encodedServiceOfferingId = URLEncoder.encode("" + serviceOfferingId, "UTF-8"); - String encodedTemplateId = URLEncoder.encode("" + templateId, "UTF-8"); - encodedApiKey = URLEncoder.encode(s_apiKey.get(), "UTF-8"); - requestToSign = - "apikey=" + encodedApiKey + "&command=deployVirtualMachine&securitygrouplist=" + encodedUsername + "&serviceofferingid=" + encodedServiceOfferingId + - "&templateid=" + encodedTemplateId + "&zoneid=" + encodedZoneId; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - url = - developerServer + "?command=deployVirtualMachine&securitygrouplist=" + encodedUsername + "&zoneid=" + encodedZoneId + "&serviceofferingid=" + - encodedServiceOfferingId + "&templateid=" + encodedTemplateId + "&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, new String[] {"id", "ipaddress"}); - - if ((values.get("ipaddress") == null) || (values.get("id") == null)) { - logger.info("deploy linux vm response code: 401"); - return 401; - } else { - logger.info("deploy linux vm response code: " + responseCode); - long linuxVMId = Long.parseLong(values.get("id")); - logger.info("got linux virtual machine id: " + linuxVMId); - s_linuxVmId.set(values.get("id")); - s_linuxIP.set(values.get("ipaddress")); - s_linuxPassword.set("rs-ccb35ea5"); - } - } else { - logger.error("deploy linux vm failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - //Create a new volume - { - url = server + "?command=createVolume&diskofferingid=" + diskOfferingId + "&zoneid=" + zoneId + "&name=newvolume&account=" + s_account.get() + "&domainid=1"; - logger.info("Creating volume...."); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, new String[] {"id"}); - - if (values.get("id") == null) { - logger.info("create volume response code: 401"); - return 401; - } else { - logger.info("create volume response code: " + responseCode); - String volumeId = values.get("id"); - logger.info("got volume id: " + volumeId); - s_newVolume.set(volumeId); - } - } else { - logger.error("create volume failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - //attach a new volume to the vm - { - url = server + "?command=attachVolume&id=" + s_newVolume.get() + "&virtualmachineid=" + s_linuxVmId.get(); - logger.info("Attaching volume with id " + s_newVolume.get() + " to the vm " + s_linuxVmId.get()); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("Attach data volume response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, new String[] {"id"}); - - if (values.get("id") == null) { - logger.info("Attach volume response code: 401"); - return 401; - } else { - logger.info("Attach volume response code: " + responseCode); - } - } else { - logger.error("Attach volume failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - //DEPLOY SECOND VM, ADD VOLUME TO IT - - // --------------------------------- - // DEPLOY another linux vm - // --------------------------------- - { - long templateId = 2; - String encodedZoneId = URLEncoder.encode("" + zoneId, "UTF-8"); - String encodedServiceOfferingId = URLEncoder.encode("" + serviceOfferingId, "UTF-8"); - String encodedTemplateId = URLEncoder.encode("" + templateId, "UTF-8"); - encodedApiKey = URLEncoder.encode(s_apiKey.get(), "UTF-8"); - requestToSign = - "apikey=" + encodedApiKey + "&command=deployVirtualMachine&securitygrouplist=" + encodedUsername + "&serviceofferingid=" + encodedServiceOfferingId + - "&templateid=" + encodedTemplateId + "&zoneid=" + encodedZoneId; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - url = - developerServer + "?command=deployVirtualMachine&securitygrouplist=" + encodedUsername + "&zoneid=" + encodedZoneId + "&serviceofferingid=" + - encodedServiceOfferingId + "&templateid=" + encodedTemplateId + "&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, new String[] {"id", "ipaddress"}); - - if ((values.get("ipaddress") == null) || (values.get("id") == null)) { - logger.info("deploy linux vm response code: 401"); - return 401; - } else { - logger.info("deploy linux vm response code: " + responseCode); - long linuxVMId = Long.parseLong(values.get("id")); - logger.info("got linux virtual machine id: " + linuxVMId); - s_linuxVmId1.set(values.get("id")); - } - } else { - logger.error("deploy linux vm failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - //Create a new volume - { - url = server + "?command=createVolume&diskofferingid=" + diskOfferingId1 + "&zoneid=" + zoneId + "&name=newvolume1&account=" + s_account.get() + "&domainid=1"; - logger.info("Creating volume...."); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, new String[] {"id"}); - - if (values.get("id") == null) { - logger.info("create volume response code: 401"); - return 401; - } else { - logger.info("create volume response code: " + responseCode); - String volumeId = values.get("id"); - logger.info("got volume id: " + volumeId); - s_newVolume1.set(volumeId); - } - } else { - logger.error("create volume failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - //attach a new volume to the vm - { - url = server + "?command=attachVolume&id=" + s_newVolume1.get() + "&virtualmachineid=" + s_linuxVmId1.get(); - logger.info("Attaching volume with id " + s_newVolume1.get() + " to the vm " + s_linuxVmId1.get()); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("Attach data volume response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, new String[] {"id"}); - - if (values.get("id") == null) { - logger.info("Attach volume response code: 401"); - return 401; - } else { - logger.info("Attach volume response code: " + responseCode); - } - } else { - logger.error("Attach volume failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - return 200; - } - - private static int executeCleanup(String server, String developerServer, String username) throws HttpException, IOException { - // test steps: - // - get user - // - delete user - - // ----------------------------- - // GET USER - // ----------------------------- - String userId = s_userId.get().toString(); - String encodedUserId = URLEncoder.encode(userId, "UTF-8"); - String url = server + "?command=listUsers&id=" + encodedUserId; - logger.info("Cleaning up resources for user: " + userId + " with url " + url); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - logger.info("get user response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map userInfo = getSingleValueFromXML(is, new String[] {"username", "id", "account"}); - if (!username.equals(userInfo.get("username"))) { - logger.error("get user failed to retrieve requested user, aborting cleanup test" + ". Following URL was sent: " + url); - return -1; - } - - } else { - logger.error("get user failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // ----------------------------- - // UPDATE USER - // ----------------------------- - { - url = server + "?command=updateUser&id=" + userId + "&firstname=delete&lastname=me"; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("update user response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map success = getSingleValueFromXML(is, new String[] {"success"}); - logger.info("update user..success? " + success.get("success")); - } else { - logger.error("update user failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - // ----------------------------- - // Execute reboot/stop/start commands for the VMs before deleting the account - made to exercise xen - // ----------------------------- - - //Reboot centos VM - String encodedApiKey = URLEncoder.encode(s_apiKey.get(), "UTF-8"); - String requestToSign = "apikey=" + encodedApiKey + "&command=rebootVirtualMachine&id=" + s_linuxVmId.get(); - requestToSign = requestToSign.toLowerCase(); - String signature = signRequest(requestToSign, s_secretKey.get()); - String encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=rebootVirtualMachine&id=" + s_linuxVmId.get() + "&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("Reboot VM response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map success = getSingleValueFromXML(el, new String[] {"success"}); - logger.info("VM was rebooted with the status: " + success.get("success")); - } else { - logger.error(" VM test failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - //Stop centos VM - requestToSign = "apikey=" + encodedApiKey + "&command=stopVirtualMachine&id=" + s_linuxVmId.get(); - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=stopVirtualMachine&id=" + s_linuxVmId.get() + "&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("Stop VM response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map success = getSingleValueFromXML(el, new String[] {"success"}); - logger.info("VM was stopped with the status: " + success.get("success")); - } else { - logger.error("Stop VM test failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - //Start centos VM - requestToSign = "apikey=" + encodedApiKey + "&command=startVirtualMachine&id=" + s_linuxVmId.get(); - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=startVirtualMachine&id=" + s_linuxVmId.get() + "&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("Start VM response code: " + responseCode); - - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map success = getSingleValueFromXML(el, new String[] {"id"}); - - if (success.get("id") == null) { - logger.info("Start linux vm response code: 401"); - return 401; - } else { - logger.info("Start vm response code: " + responseCode); - } - - logger.info("VM was started with the status: " + success.get("success")); - } else { - logger.error("Start VM test failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - -//// // ----------------------------- -//// // DISABLE USER -//// // ----------------------------- -// { -// url = server + "?command=disableUser&id=" + userId; -// client = new HttpClient(); -// method = new GetMethod(url); -// responseCode = client.executeMethod(method); -// logger.info("disable user response code: " + responseCode); -// if (responseCode == 200) { -// InputStream input = method.getResponseBodyAsStream(); -// Element el = queryAsyncJobResult(server, input); -// logger -// .info("Disabled user successfully"); -// } else { -// logger.error("disable user failed with error code: " + responseCode + ". Following URL was sent: " + url); -// return responseCode; -// } -// } - - // ----------------------------- - // DELETE USER - // ----------------------------- - { - url = server + "?command=deleteUser&id=" + userId; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("delete user response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - logger.info("Deleted user successfully"); - } else { - logger.error("delete user failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - return responseCode; - } - - private static int executeEventsAndBilling(String server, String developerServer) throws HttpException, IOException { - // test steps: - // - get all the events in the system for all users in the system - // - generate all the usage records in the system - // - get all the usage records in the system - - // ----------------------------- - // GET EVENTS - // ----------------------------- - String url = server + "?command=listEvents&page=1&account=" + s_account.get(); - - logger.info("Getting events for the account " + s_account.get()); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - logger.info("get events response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map> eventDescriptions = getMultipleValuesFromXML(is, new String[] {"description"}); - List descriptionText = eventDescriptions.get("description"); - if (descriptionText == null) { - logger.info("no events retrieved..."); - } else { - for (String text : descriptionText) { - logger.info("event: " + text); - } - } - } else { - logger.error("list events failed with error code: " + responseCode + ". Following URL was sent: " + url); - - return responseCode; - } - return responseCode; - } - - private static int executeStop(String server, String developerServer, String username) throws HttpException, IOException { - // test steps: - // - get userId for the given username - // - list virtual machines for the user - // - stop all virtual machines - // - get ip addresses for the user - // - release ip addresses - - // ----------------------------- - // GET USER - // ----------------------------- - String userId = s_userId.get().toString(); - String encodedUserId = URLEncoder.encode(userId, "UTF-8"); - - String url = server + "?command=listUsers&id=" + encodedUserId; - logger.info("Stopping resources for user: " + username); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - logger.info("get user response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map userIdValues = getSingleValueFromXML(is, new String[] {"id"}); - String userIdStr = userIdValues.get("id"); - if (userIdStr != null) { - userId = userIdStr; - if (userId == null) { - logger.error("get user failed to retrieve a valid user id, aborting depolyment test" + ". Following URL was sent: " + url); - return -1; - } - } - } else { - logger.error("get user failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - { - // ---------------------------------- - // LIST VIRTUAL MACHINES - // ---------------------------------- - String encodedApiKey = URLEncoder.encode(s_apiKey.get(), "UTF-8"); - String requestToSign = "apikey=" + encodedApiKey + "&command=listVirtualMachines"; - requestToSign = requestToSign.toLowerCase(); - String signature = signRequest(requestToSign, s_secretKey.get()); - String encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=listVirtualMachines&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - - logger.info("Listing all virtual machines for the user with url " + url); - String[] vmIds = null; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("list virtual machines response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map> vmIdValues = getMultipleValuesFromXML(is, new String[] {"id"}); - if (vmIdValues.containsKey("id")) { - List vmIdList = vmIdValues.get("id"); - if (vmIdList != null) { - vmIds = new String[vmIdList.size()]; - vmIdList.toArray(vmIds); - String vmIdLogStr = ""; - if ((vmIds != null) && (vmIds.length > 0)) { - vmIdLogStr = vmIds[0]; - for (int i = 1; i < vmIds.length; i++) { - vmIdLogStr = vmIdLogStr + "," + vmIds[i]; - } - } - logger.info("got virtual machine ids: " + vmIdLogStr); - } - } - - } else { - logger.error("list virtual machines test failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // ---------------------------------- - // STOP/DESTROY VIRTUAL MACHINES - // ---------------------------------- - if (vmIds != null) { - for (String vmId : vmIds) { - requestToSign = "apikey=" + encodedApiKey + "&command=stopVirtualMachine&id=" + vmId; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=stopVirtualMachine&id=" + vmId + "&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("StopVirtualMachine" + " [" + vmId + "] response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map success = getSingleValueFromXML(el, new String[] {"success"}); - logger.info("StopVirtualMachine..success? " + success.get("success")); - } else { - logger.error("Stop virtual machine test failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - } - -// { -// url = server + "?command=deleteUser&id=" + userId; -// client = new HttpClient(); -// method = new GetMethod(url); -// responseCode = client.executeMethod(method); -// logger.info("delete user response code: " + responseCode); -// if (responseCode == 200) { -// InputStream input = method.getResponseBodyAsStream(); -// Element el = queryAsyncJobResult(server, input); -// logger -// .info("Deleted user successfully"); -// } else { -// logger.error("delete user failed with error code: " + responseCode + ". Following URL was sent: " + url); -// return responseCode; -// } -// } - - } - - s_linuxIP.set(""); - s_linuxVmId.set(""); - s_linuxPassword.set(""); - s_windowsIP.set(""); - s_secretKey.set(""); - s_apiKey.set(""); - s_userId.set(Long.parseLong("0")); - s_account.set(""); - s_domainRouterId.set(""); - return responseCode; - } - - public static String signRequest(String request, String key) { - try { - Mac mac = Mac.getInstance("HmacSHA1"); - SecretKeySpec keySpec = new SecretKeySpec(key.getBytes(), "HmacSHA1"); - mac.init(keySpec); - mac.update(request.getBytes()); - byte[] encryptedBytes = mac.doFinal(); - return Base64.encodeBase64String(encryptedBytes); - } catch (Exception ex) { - logger.error("unable to sign request", ex); - } - return null; - } - - private static String sshWinTest(String host) { - if (host == null) { - logger.info("Did not receive a host back from test, ignoring win ssh test"); - return null; - } - - // We will retry 5 times before quitting - int retry = 1; - - while (true) { - try { - if (retry > 0) { - logger.info("Retry attempt : " + retry + " ...sleeping 300 seconds before next attempt. Account is " + s_account.get()); - Thread.sleep(300000); - } - - logger.info("Attempting to SSH into windows host " + host + " with retry attempt: " + retry + " for account " + s_account.get()); - - Connection conn = new Connection(host); - conn.connect(null, 60000, 60000); - - logger.info("User " + s_account.get() + " ssHed successfully into windows host " + host); - boolean success = false; - boolean isAuthenticated = conn.authenticateWithPassword("Administrator", "password"); - if (isAuthenticated == false) { - return "Authentication failed"; - } else { - logger.info("Authentication is successful"); - } - - try { - SCPClient scp = new SCPClient(conn); - scp.put("wget.exe", "wget.exe", "C:\\Users\\Administrator", "0777"); - logger.info("Successfully put wget.exe file"); - } catch (Exception ex) { - logger.error("Unable to put wget.exe " + ex); - } - - if (conn == null) { - logger.error("Connection is null"); - } - Session sess = conn.openSession(); - - logger.info("User + " + s_account.get() + " executing : wget http://192.168.1.250/dump.bin"); - sess.execCommand("wget http://192.168.1.250/dump.bin && dir dump.bin"); - - InputStream stdout = sess.getStdout(); - InputStream stderr = sess.getStderr(); - - byte[] buffer = new byte[8192]; - while (true) { - if ((stdout.available() == 0) && (stderr.available() == 0)) { - int conditions = sess.waitForCondition(ChannelCondition.STDOUT_DATA | ChannelCondition.STDERR_DATA | ChannelCondition.EOF, 120000); - - if ((conditions & ChannelCondition.TIMEOUT) != 0) { - logger.info("Timeout while waiting for data from peer."); - return null; - } - - if ((conditions & ChannelCondition.EOF) != 0) { - if ((conditions & (ChannelCondition.STDOUT_DATA | ChannelCondition.STDERR_DATA)) == 0) { - break; - } - } - } - - while (stdout.available() > 0) { - success = true; - int len = stdout.read(buffer); - if (len > 0) // this check is somewhat paranoid - logger.info(new String(buffer, 0, len)); - } - - while (stderr.available() > 0) { - /* int len = */stderr.read(buffer); - } - } - sess.close(); - conn.close(); - - if (success) { - Thread.sleep(120000); - return null; - } else { - retry++; - if (retry == MAX_RETRY_WIN) { - return "SSH Windows Network test fail for account " + s_account.get(); - } - } - } catch (Exception e) { - logger.error(e); - retry++; - if (retry == MAX_RETRY_WIN) { - return "SSH Windows Network test fail with error " + e.getMessage(); - } - } - } - } - - private static String sshTest(String host, String password) { - int i = 0; - if (host == null) { - logger.info("Did not receive a host back from test, ignoring ssh test"); - return null; - } - - if (password == null) { - logger.info("Did not receive a password back from test, ignoring ssh test"); - return null; - } - - // We will retry 5 times before quitting - String result = null; - int retry = 0; - - while (true) { - try { - if (retry > 0) { - logger.info("Retry attempt : " + retry + " ...sleeping 120 seconds before next attempt. Account is " + s_account.get()); - Thread.sleep(120000); - } - - logger.info("Attempting to SSH into linux host " + host + " with retry attempt: " + retry + ". Account is " + s_account.get()); - - Connection conn = new Connection(host); - conn.connect(null, 60000, 60000); - - logger.info("User + " + s_account.get() + " ssHed successfully into linux host " + host); - - boolean isAuthenticated = conn.authenticateWithPassword("root", password); - - if (isAuthenticated == false) { - logger.info("Authentication failed for root with password" + password); - return "Authentication failed"; - - } - - boolean success = false; - String linuxCommand = null; - - if (i % 10 == 0) - linuxCommand = "rm -rf *; wget http://192.168.1.250/dump.bin && ls -al dump.bin"; - else - linuxCommand = "wget http://192.168.1.250/dump.bin && ls -al dump.bin"; - - Session sess = conn.openSession(); - logger.info("User " + s_account.get() + " executing : " + linuxCommand); - sess.execCommand(linuxCommand); - - InputStream stdout = sess.getStdout(); - InputStream stderr = sess.getStderr(); - - byte[] buffer = new byte[8192]; - while (true) { - if ((stdout.available() == 0) && (stderr.available() == 0)) { - int conditions = sess.waitForCondition(ChannelCondition.STDOUT_DATA | ChannelCondition.STDERR_DATA | ChannelCondition.EOF, 120000); - - if ((conditions & ChannelCondition.TIMEOUT) != 0) { - logger.info("Timeout while waiting for data from peer."); - return null; - } - - if ((conditions & ChannelCondition.EOF) != 0) { - if ((conditions & (ChannelCondition.STDOUT_DATA | ChannelCondition.STDERR_DATA)) == 0) { - break; - } - } - } - - while (stdout.available() > 0) { - success = true; - int len = stdout.read(buffer); - if (len > 0) // this check is somewhat paranoid - logger.info(new String(buffer, 0, len)); - } - - while (stderr.available() > 0) { - /* int len = */stderr.read(buffer); - } - } - - sess.close(); - conn.close(); - - if (!success) { - retry++; - if (retry == MAX_RETRY_LINUX) { - result = "SSH Linux Network test fail"; - } - } - - return result; - } catch (Exception e) { - retry++; - logger.error("SSH Linux Network test fail with error"); - if (retry == MAX_RETRY_LINUX) { - return "SSH Linux Network test fail with error " + e.getMessage(); - } - } - i++; - } - } - - public static String createMD5Password(String password) { - MessageDigest md5; - - try { - md5 = MessageDigest.getInstance("MD5"); - } catch (NoSuchAlgorithmException e) { - throw new CloudRuntimeException("Error", e); - } - - md5.reset(); - BigInteger pwInt = new BigInteger(1, md5.digest(password.getBytes())); - - // make sure our MD5 hash value is 32 digits long... - StringBuffer sb = new StringBuffer(); - String pwStr = pwInt.toString(16); - int padding = 32 - pwStr.length(); - for (int i = 0; i < padding; i++) { - sb.append('0'); - } - sb.append(pwStr); - return sb.toString(); - } - - public static Element queryAsyncJobResult(String host, InputStream inputStream) { - Element returnBody = null; - - Map values = getSingleValueFromXML(inputStream, new String[] {"jobid"}); - String jobId = values.get("jobid"); - - if (jobId == null) { - logger.error("Unable to get a jobId"); - return null; - } - - //logger.info("Job id is " + jobId); - String resultUrl = host + "?command=queryAsyncJobResult&jobid=" + jobId; - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(resultUrl); - while (true) { - try { - client.executeMethod(method); - //logger.info("Method is executed successfully. Following url was sent " + resultUrl); - InputStream is = method.getResponseBodyAsStream(); - DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); - DocumentBuilder builder = factory.newDocumentBuilder(); - Document doc = builder.parse(is); - returnBody = doc.getDocumentElement(); - doc.getDocumentElement().normalize(); - Element jobStatusTag = (Element)returnBody.getElementsByTagName("jobstatus").item(0); - String jobStatus = jobStatusTag.getTextContent(); - if (jobStatus.equals("0")) { - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - logger.debug("[ignored] interrupted while during async job result query."); - } - } else { - break; - } - - } catch (Exception ex) { - logger.error(ex); - } - } - return returnBody; - } - -} diff --git a/test/src-not-used/main/java/com/cloud/test/stress/TestClientWithAPI.java b/test/src-not-used/main/java/com/cloud/test/stress/TestClientWithAPI.java deleted file mode 100644 index 3bb65a3f155..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/stress/TestClientWithAPI.java +++ /dev/null @@ -1,2290 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.stress; - -import java.io.IOException; -import java.io.InputStream; -import java.math.BigInteger; -import java.net.URLEncoder; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.text.DateFormat; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Date; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Random; - -import javax.crypto.Mac; -import javax.crypto.spec.SecretKeySpec; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; - -import org.apache.commons.httpclient.HttpClient; -import org.apache.commons.httpclient.HttpException; -import org.apache.commons.httpclient.HttpMethod; -import org.apache.commons.httpclient.methods.GetMethod; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.apache.log4j.NDC; -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -import com.trilead.ssh2.ChannelCondition; -import com.trilead.ssh2.Connection; -import com.trilead.ssh2.SCPClient; -import com.trilead.ssh2.Session; - -import com.cloud.utils.exception.CloudRuntimeException; - -public class TestClientWithAPI { - private static long sleepTime = 180000L; // default 0 - private static boolean cleanUp = true; - protected Logger logger = LogManager.getLogger(getClass()); - private static boolean repeat = true; - private static int numOfUsers = 0; - private static String[] users = null; - private static boolean internet = false; - private static ThreadLocal s_linuxIP = new ThreadLocal(); - private static ThreadLocal s_linuxIpId = new ThreadLocal(); - private static ThreadLocal s_linuxVmId = new ThreadLocal(); - private static ThreadLocal s_linuxPassword = new ThreadLocal(); - private static ThreadLocal s_windowsIP = new ThreadLocal(); - private static ThreadLocal s_windowsIpId = new ThreadLocal(); - private static ThreadLocal s_windowsVmId = new ThreadLocal(); - private static ThreadLocal s_secretKey = new ThreadLocal(); - private static ThreadLocal s_apiKey = new ThreadLocal(); - private static ThreadLocal s_userId = new ThreadLocal(); - private static ThreadLocal s_accountId = new ThreadLocal(); - private static ThreadLocal s_account = new ThreadLocal(); - private static ThreadLocal s_domainRouterId = new ThreadLocal(); - private static ThreadLocal s_pfGroupId = new ThreadLocal(); - private static ThreadLocal s_windowsLb = new ThreadLocal(); - private static ThreadLocal s_linuxLb = new ThreadLocal(); - private static ThreadLocal s_dataVolume = new ThreadLocal(); - private static ThreadLocal s_rootVolume = new ThreadLocal(); - private static ThreadLocal s_newVolume = new ThreadLocal(); - private static ThreadLocal s_snapshot = new ThreadLocal(); - private static ThreadLocal s_volumeFromSnapshot = new ThreadLocal(); - private static ThreadLocal s_networkId = new ThreadLocal(); - private static ThreadLocal s_publicIpId = new ThreadLocal(); - private static ThreadLocal s_winipfwdid = new ThreadLocal(); - private static ThreadLocal s_linipfwdid = new ThreadLocal(); - private static DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); - private static int usageIterator = 1; - private static int numThreads = 1; - private static int wait = 5000; - private static String accountName = null; - private static String zoneId = "1"; - private static String snapshotTest = "no"; - private static String serviceOfferingId = "1"; - private static String diskOfferingId = "4"; - private static String networkOfferingId = "6"; - private static String vmPassword = "rs-ccb35ea5"; - private static String downloadUrl = "192.168.1.250/dump.bin"; - - private static final int MAX_RETRY_LINUX = 10; - private static final int MAX_RETRY_WIN = 10; - - public static void main(String[] args) { - String host = "http://localhost"; - String port = "8092"; - String devPort = "8080"; - String apiUrl = "/client/api"; - - try { - // Parameters - List argsList = Arrays.asList(args); - Iterator iter = argsList.iterator(); - while (iter.hasNext()) { - String arg = iter.next(); - // host - if (arg.equals("-h")) { - host = "http://" + iter.next(); - } - - if (arg.equals("-p")) { - port = iter.next(); - } - if (arg.equals("-dp")) { - devPort = iter.next(); - } - - if (arg.equals("-t")) { - numThreads = Integer.parseInt(iter.next()); - } - - if (arg.equals("-s")) { - sleepTime = Long.parseLong(iter.next()); - } - if (arg.equals("-a")) { - accountName = iter.next(); - } - - if (arg.equals("-c")) { - cleanUp = Boolean.parseBoolean(iter.next()); - if (!cleanUp) - sleepTime = 0L; // no need to wait if we don't ever - // cleanup - } - - if (arg.equals("-r")) { - repeat = Boolean.parseBoolean(iter.next()); - } - - if (arg.equals("-u")) { - numOfUsers = Integer.parseInt(iter.next()); - } - - if (arg.equals("-i")) { - internet = Boolean.parseBoolean(iter.next()); - } - - if (arg.equals("-w")) { - wait = Integer.parseInt(iter.next()); - } - - if (arg.equals("-z")) { - zoneId = iter.next(); - } - - if (arg.equals("-snapshot")) { - snapshotTest = "yes"; - } - - if (arg.equals("-so")) { - serviceOfferingId = iter.next(); - } - - if (arg.equals("-do")) { - diskOfferingId = iter.next(); - } - - if (arg.equals("-no")) { - networkOfferingId = iter.next(); - } - - if (arg.equals("-pass")) { - vmPassword = iter.next(); - } - - if (arg.equals("-url")) { - downloadUrl = iter.next(); - } - - } - - final String server = host + ":" + port + "/"; - final String developerServer = host + ":" + devPort + apiUrl; - logger.info("Starting test against server: " + server + " with " + numThreads + " thread(s)"); - if (cleanUp) - logger.info("Clean up is enabled, each test will wait " + sleepTime + " ms before cleaning up"); - - if (numOfUsers > 0) { - logger.info("Pre-generating users for test of size : " + numOfUsers); - users = new String[numOfUsers]; - Random ran = new Random(); - for (int i = 0; i < numOfUsers; i++) { - users[i] = Math.abs(ran.nextInt()) + "-user"; - } - } - - for (int i = 0; i < numThreads; i++) { - new Thread(new Runnable() { - @Override - public void run() { - do { - String username = null; - try { - long now = System.currentTimeMillis(); - Random ran = new Random(); - if (users != null) { - username = users[Math.abs(ran.nextInt()) % numOfUsers]; - } else { - username = Math.abs(ran.nextInt()) + "-user"; - } - NDC.push(username); - - logger.info("Starting test for the user " + username); - int response = executeDeployment(server, developerServer, username, snapshotTest); - boolean success = false; - String reason = null; - - if (response == 200) { - success = true; - if (internet) { - logger.info("Deploy successful...waiting 5 minute before SSH tests"); - Thread.sleep(300000L); // Wait 60 - // seconds so - // the windows VM - // can boot up and do a sys prep. - - if (accountName == null) { - logger.info("Begin Linux SSH test for account " + s_account.get()); - reason = sshTest(s_linuxIP.get(), s_linuxPassword.get(), snapshotTest); - } - - if (reason == null) { - logger.info("Linux SSH test successful for account " + s_account.get()); - logger.info("Begin WindowsSSH test for account " + s_account.get()); - - reason = sshTest(s_linuxIP.get(), s_linuxPassword.get(), snapshotTest); - // reason = sshWinTest(s_windowsIP.get()); - } - - // release the linux IP now... - s_linuxIP.set(null); - // release the Windows IP now - s_windowsIP.set(null); - } - - // sleep for 3 min before getting the latest network stat - // logger.info("Sleeping for 5 min before getting the lates network stat for the account"); - // Thread.sleep(300000); - // verify that network stat is correct for the user; if it's not - stop all the resources - // for the user - // if ((reason == null) && (getNetworkStat(server) == false) ) { - // logger.error("Stopping all the resources for the account " + s_account.get() + - // " as network stat is incorrect"); - // int stopResponseCode = executeStop( - // server, developerServer, - // username, false); - // logger - // .info("stop command finished with response code: " - // + stopResponseCode); - // success = false; // since the SSH test - // - // } else - if (reason == null) { - if (internet) { - logger.info("Windows SSH test successful for account " + s_account.get()); - } else { - logger.info("deploy test successful....now cleaning up"); - if (cleanUp) { - logger.info("Waiting " + sleepTime + " ms before cleaning up vms"); - Thread.sleep(sleepTime); - } else { - success = true; - } - } - - if (usageIterator >= numThreads) { - int eventsAndBillingResponseCode = executeEventsAndBilling(server, developerServer); - logger.info("events and usage records command finished with response code: " + eventsAndBillingResponseCode); - usageIterator = 1; - - } else { - logger.info("Skipping events and usage records for this user: usageIterator " + usageIterator + " and number of Threads " + - numThreads); - usageIterator++; - } - - if ((users == null) && (accountName == null)) { - logger.info("Sending cleanup command"); - int cleanupResponseCode = executeCleanup(server, developerServer, username); - logger.info("cleanup command finished with response code: " + cleanupResponseCode); - success = (cleanupResponseCode == 200); - } else { - logger.info("Sending stop DomR / destroy VM command"); - int stopResponseCode = executeStop(server, developerServer, username, true); - logger.info("stop(destroy) command finished with response code: " + stopResponseCode); - success = (stopResponseCode == 200); - } - - } else { - // Just stop but don't destroy the - // VMs/Routers - logger.info("SSH test failed for account " + s_account.get() + "with reason '" + reason + "', stopping VMs"); - int stopResponseCode = executeStop(server, developerServer, username, false); - logger.info("stop command finished with response code: " + stopResponseCode); - success = false; // since the SSH test - // failed, mark the - // whole test as - // failure - } - } else { - // Just stop but don't destroy the - // VMs/Routers - logger.info("Deploy test failed with reason '" + reason + "', stopping VMs"); - int stopResponseCode = executeStop(server, developerServer, username, true); - logger.info("stop command finished with response code: " + stopResponseCode); - success = false; // since the deploy test - // failed, mark the - // whole test as failure - } - - if (success) { - logger.info("***** Completed test for user : " + username + " in " + ((System.currentTimeMillis() - now) / 1000L) + " seconds"); - - } else { - logger.info("##### FAILED test for user : " + username + " in " + ((System.currentTimeMillis() - now) / 1000L) + - " seconds with reason : " + reason); - } - logger.info("Sleeping for " + wait + " seconds before starting next iteration"); - Thread.sleep(wait); - } catch (Exception e) { - logger.warn("Error in thread", e); - try { - int stopResponseCode = executeStop(server, developerServer, username, true); - logger.info("stop response code: " + stopResponseCode); - } catch (Exception e1) { - logger.info("[ignored]" - + "error executing stop during api test: " + e1.getLocalizedMessage()); - } - } finally { - NDC.clear(); - } - } while (repeat); - } - }).start(); - } - } catch (Exception e) { - logger.error(e); - } - } - - public static Map> getMultipleValuesFromXML(InputStream is, String[] tagNames) { - Map> returnValues = new HashMap>(); - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); - for (int i = 0; i < tagNames.length; i++) { - NodeList targetNodes = rootElement.getElementsByTagName(tagNames[i]); - if (targetNodes.getLength() <= 0) { - logger.error("no " + tagNames[i] + " tag in XML response...returning null"); - } else { - List valueList = new ArrayList(); - for (int j = 0; j < targetNodes.getLength(); j++) { - Node node = targetNodes.item(j); - valueList.add(node.getTextContent()); - } - returnValues.put(tagNames[i], valueList); - } - } - } catch (Exception ex) { - logger.error(ex); - } - return returnValues; - } - - public static Map getSingleValueFromXML(InputStream is, String[] tagNames) { - Map returnValues = new HashMap(); - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); - - for (int i = 0; i < tagNames.length; i++) { - NodeList targetNodes = rootElement.getElementsByTagName(tagNames[i]); - if (targetNodes.getLength() <= 0) { - logger.error("no " + tagNames[i] + " tag in XML response...returning null"); - } else { - returnValues.put(tagNames[i], targetNodes.item(0).getTextContent()); - } - } - } catch (Exception ex) { - logger.error("error processing XML", ex); - } - return returnValues; - } - - public static Map getSingleValueFromXML(Element rootElement, String[] tagNames) { - Map returnValues = new HashMap(); - if (rootElement == null) { - logger.error("Root element is null, can't get single value from xml"); - return null; - } - try { - for (int i = 0; i < tagNames.length; i++) { - NodeList targetNodes = rootElement.getElementsByTagName(tagNames[i]); - if (targetNodes.getLength() <= 0) { - logger.error("no " + tagNames[i] + " tag in XML response...returning null"); - } else { - returnValues.put(tagNames[i], targetNodes.item(0).getTextContent()); - } - } - } catch (Exception ex) { - logger.error("error processing XML", ex); - } - return returnValues; - } - - private static List getNonSourceNatIPs(InputStream is) { - List returnValues = new ArrayList(); - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); - NodeList allocatedIpAddrNodes = rootElement.getElementsByTagName("publicipaddress"); - for (int i = 0; i < allocatedIpAddrNodes.getLength(); i++) { - Node allocatedIpAddrNode = allocatedIpAddrNodes.item(i); - NodeList childNodes = allocatedIpAddrNode.getChildNodes(); - String ipAddress = null; - boolean isSourceNat = true; // assume it's source nat until we - // find otherwise - for (int j = 0; j < childNodes.getLength(); j++) { - Node n = childNodes.item(j); - if ("id".equals(n.getNodeName())) { - // if ("ipaddress".equals(n.getNodeName())) { - ipAddress = n.getTextContent(); - } else if ("issourcenat".equals(n.getNodeName())) { - isSourceNat = Boolean.parseBoolean(n.getTextContent()); - } - } - if ((ipAddress != null) && !isSourceNat) { - returnValues.add(ipAddress); - } - } - } catch (Exception ex) { - logger.error(ex); - } - return returnValues; - } - - private static List getIPs(InputStream is, boolean sourceNat) { - List returnValues = new ArrayList(); - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); - NodeList allocatedIpAddrNodes = rootElement.getElementsByTagName("publicipaddress"); - for (int i = 0; i < allocatedIpAddrNodes.getLength(); i++) { - Node allocatedIpAddrNode = allocatedIpAddrNodes.item(i); - NodeList childNodes = allocatedIpAddrNode.getChildNodes(); - String ipAddress = null; - String ipAddressId = null; - boolean isSourceNat = false; // assume it's *not* source nat until we find otherwise - for (int j = 0; j < childNodes.getLength(); j++) { - Node n = childNodes.item(j); - //Id is being used instead of ipaddress. Changes need to done later to ipaddress variable - if ("id".equals(n.getNodeName())) { - ipAddressId = n.getTextContent(); - } else if ("ipaddress".equals(n.getNodeName())) { - ipAddress = n.getTextContent(); - } else if ("issourcenat".equals(n.getNodeName())) { - isSourceNat = Boolean.parseBoolean(n.getTextContent()); - } - } - if ((ipAddress != null) && isSourceNat == sourceNat) { - returnValues.add(ipAddressId); - returnValues.add(ipAddress); - } - } - } catch (Exception ex) { - logger.error(ex); - } - return returnValues; - } - - private static String executeRegistration(String server, String username, String password) throws HttpException, IOException { - String url = server + "?command=registerUserKeys&id=" + s_userId.get().toString(); - logger.info("registering: " + username); - String returnValue = null; - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map requestKeyValues = getSingleValueFromXML(is, new String[] {"apikey", "secretkey"}); - s_apiKey.set(requestKeyValues.get("apikey")); - returnValue = requestKeyValues.get("secretkey"); - } else { - logger.error("registration failed with error code: " + responseCode); - } - return returnValue; - } - - private static Integer executeDeployment(String server, String developerServer, String username, String snapshotTest) throws HttpException, IOException { - // test steps: - // - create user - // - deploy Windows VM - // - deploy Linux VM - // - associate IP address - // - create two IP forwarding rules - // - create load balancer rule - // - list IP forwarding rules - // - list load balancer rules - - // ----------------------------- - // CREATE ACCOUNT - // ----------------------------- - String encodedUsername = URLEncoder.encode(username, "UTF-8"); - String encryptedPassword = createMD5Password(username); - String encodedPassword = URLEncoder.encode(encryptedPassword, "UTF-8"); - - String url = - server + "?command=createAccount&username=" + encodedUsername + "&account=" + encodedUsername + "&password=" + encodedPassword + - "&firstname=Test&lastname=Test&email=test@vmops.com&domainId=1&accounttype=0"; - - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - long accountId = -1; - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map accountValues = getSingleValueFromXML(is, new String[] {"id", "name"}); - String accountIdStr = accountValues.get("id"); - logger.info("created account " + username + " with id " + accountIdStr); - if (accountIdStr != null) { - accountId = Long.parseLong(accountIdStr); - s_accountId.set(accountId); - s_account.set(accountValues.get("name")); - if (accountId == -1) { - logger.error("create account (" + username + ") failed to retrieve a valid user id, aborting depolyment test"); - return -1; - } - } - } else { - logger.error("create account test failed for account " + username + " with error code :" + responseCode + - ", aborting deployment test. The command was sent with url " + url); - return -1; - } - - // LIST JUST CREATED USER TO GET THE USER ID - url = server + "?command=listUsers&username=" + encodedUsername + "&account=" + encodedUsername + "&domainId=1"; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - long userId = -1; - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map userIdValues = getSingleValueFromXML(is, new String[] {"id"}); - String userIdStr = userIdValues.get("id"); - logger.info("listed user " + username + " with id " + userIdStr); - if (userIdStr != null) { - userId = Long.parseLong(userIdStr); - s_userId.set(userId); - if (userId == -1) { - logger.error("list user by username " + username + ") failed to retrieve a valid user id, aborting depolyment test"); - return -1; - } - } - } else { - logger.error("list user test failed for account " + username + " with error code :" + responseCode + - ", aborting deployment test. The command was sent with url " + url); - return -1; - } - - s_secretKey.set(executeRegistration(server, username, username)); - - if (s_secretKey.get() == null) { - logger.error("FAILED to retrieve secret key during registration, skipping user: " + username); - return -1; - } else { - logger.info("got secret key: " + s_secretKey.get()); - logger.info("got api key: " + s_apiKey.get()); - } - - // --------------------------------- - // CREATE VIRTUAL NETWORK - // --------------------------------- - url = - server + "?command=createNetwork&networkofferingid=" + networkOfferingId + "&account=" + encodedUsername + "&domainId=1" + "&zoneId=" + zoneId + - "&name=virtualnetwork-" + encodedUsername + "&displaytext=virtualnetwork-" + encodedUsername; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map networkValues = getSingleValueFromXML(is, new String[] {"id"}); - String networkIdStr = networkValues.get("id"); - logger.info("Created virtual network with name virtualnetwork-" + encodedUsername + " and id " + networkIdStr); - if (networkIdStr != null) { - s_networkId.set(networkIdStr); - } - } else { - logger.error("Create virtual network failed for account " + username + " with error code :" + responseCode + - ", aborting deployment test. The command was sent with url " + url); - return -1; - } - /* - // --------------------------------- - // CREATE DIRECT NETWORK - // --------------------------------- - url = server + "?command=createNetwork&networkofferingid=" + networkOfferingId_dir + "&account=" + encodedUsername + "&domainId=1" + "&zoneId=" + zoneId + "&name=directnetwork-" + encodedUsername + "&displaytext=directnetwork-" + encodedUsername; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map networkValues = getSingleValueFromXML(is, new String[] { "id" }); - String networkIdStr = networkValues.get("id"); - logger.info("Created direct network with name directnetwork-" + encodedUsername + " and id " + networkIdStr); - if (networkIdStr != null) { - s_networkId_dir.set(networkIdStr); - } - } else { - logger.error("Create direct network failed for account " + username + " with error code :" + responseCode + ", aborting deployment test. The command was sent with url " + url); - return -1; - } - */ - - // --------------------------------- - // DEPLOY LINUX VM - // --------------------------------- - String linuxVMPrivateIP = null; - { - // long templateId = 3; - long templateId = 4; - String encodedZoneId = URLEncoder.encode("" + zoneId, "UTF-8"); - String encodedServiceOfferingId = URLEncoder.encode("" + serviceOfferingId, "UTF-8"); - String encodedTemplateId = URLEncoder.encode("" + templateId, "UTF-8"); - String encodedApiKey = URLEncoder.encode(s_apiKey.get(), "UTF-8"); - String encodedNetworkIds = URLEncoder.encode(s_networkId.get() + ",206", "UTF-8"); - String requestToSign = - "apikey=" + encodedApiKey + "&command=deployVirtualMachine&diskofferingid=" + diskOfferingId + "&networkids=" + encodedNetworkIds + - "&serviceofferingid=" + encodedServiceOfferingId + "&templateid=" + encodedTemplateId + "&zoneid=" + encodedZoneId; - requestToSign = requestToSign.toLowerCase(); - String signature = signRequest(requestToSign, s_secretKey.get()); - String encodedSignature = URLEncoder.encode(signature, "UTF-8"); - url = - developerServer + "?command=deployVirtualMachine" + "&zoneid=" + encodedZoneId + "&serviceofferingid=" + encodedServiceOfferingId + "&diskofferingid=" + - diskOfferingId + "&networkids=" + encodedNetworkIds + "&templateid=" + encodedTemplateId + "&apikey=" + encodedApiKey + "&signature=" + - encodedSignature; - - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, new String[] {"id", "ipaddress"}); - - if ((values.get("ipaddress") == null) || (values.get("id") == null)) { - logger.info("deploy linux vm response code: 401, the command was sent with url " + url); - return 401; - } else { - logger.info("deploy linux vm response code: " + responseCode); - long linuxVMId = Long.parseLong(values.get("id")); - logger.info("got linux virtual machine id: " + linuxVMId); - s_linuxVmId.set(values.get("id")); - linuxVMPrivateIP = values.get("ipaddress"); - // s_linuxPassword.set(values.get("password")); - s_linuxPassword.set(vmPassword); - logger.info("got linux virtual machine password: " + s_linuxPassword.get()); - } - } else { - logger.error("deploy linux vm failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - { - // --------------------------------- - // ASSOCIATE IP for windows - // --------------------------------- - String ipAddr = null; - - String encodedApiKey = URLEncoder.encode(s_apiKey.get(), "UTF-8"); - String requestToSign = "apikey=" + encodedApiKey + "&command=associateIpAddress" + "&zoneid=" + zoneId; - requestToSign = requestToSign.toLowerCase(); - String signature = signRequest(requestToSign, s_secretKey.get()); - String encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=associateIpAddress" + "&apikey=" + encodedApiKey + "&zoneid=" + zoneId + "&signature=" + encodedSignature; - - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - /*Asynchronous Job - Corresponding Changes Made*/ - Element associpel = queryAsyncJobResult(server, is); - Map values = getSingleValueFromXML(associpel, new String[] {"id", "ipaddress"}); - - if ((values.get("ipaddress") == null) || (values.get("id") == null)) { - logger.info("associate ip for Windows response code: 401, the command was sent with url " + url); - return 401; - } else { - logger.info("Associate IP Address response code: " + responseCode); - long publicIpId = Long.parseLong(values.get("id")); - logger.info("Associate IP's Id: " + publicIpId); - s_publicIpId.set(values.get("id")); - } - } else { - logger.error("associate ip address for windows vm failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - String encodedPublicIpId = URLEncoder.encode(s_publicIpId.get(), "UTF-8"); - requestToSign = "apikey=" + encodedApiKey + "&command=listPublicIpAddresses" + "&id=" + encodedPublicIpId; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=listPublicIpAddresses&apikey=" + encodedApiKey + "&id=" + encodedPublicIpId + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("url is " + url); - logger.info("list ip addresses for user " + userId + " response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - // InputStream ips = method.getResponseBodyAsStream(); - List ipAddressValues = getIPs(is, false); - // List ipAddressVals = getIPs(is, false, true); - if ((ipAddressValues != null) && !ipAddressValues.isEmpty()) { - s_windowsIpId.set(ipAddressValues.get(0)); - s_windowsIP.set(ipAddressValues.get(1)); - logger.info("For Windows, using non-sourceNat IP address ID: " + ipAddressValues.get(0)); - logger.info("For Windows, using non-sourceNat IP address: " + ipAddressValues.get(1)); - } - } else { - logger.error("list ip addresses failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // --------------------------------- - // Use the SourceNat IP for linux - // --------------------------------- - { - requestToSign = "apikey=" + encodedApiKey + "&command=listPublicIpAddresses"; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=listPublicIpAddresses&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("url is " + url); - logger.info("list ip addresses for user " + userId + " response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); -// InputStream ips = method.getResponseBodyAsStream(); - List ipAddressValues = getIPs(is, true); -// is = method.getResponseBodyAsStream(); -// List ipAddressVals = getIPs(is, true, true); - if ((ipAddressValues != null) && !ipAddressValues.isEmpty()) { - s_linuxIpId.set(ipAddressValues.get(0)); - s_linuxIP.set(ipAddressValues.get(1)); - logger.info("For linux, using sourceNat IP address ID: " + ipAddressValues.get(0)); - logger.info("For linux, using sourceNat IP address: " + ipAddressValues.get(1)); - } - } else { - logger.error("list ip addresses failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - //-------------------------------------------- - // Enable Static NAT for the Source NAT Ip - //-------------------------------------------- - String encodedSourceNatPublicIpId = URLEncoder.encode(s_linuxIpId.get(), "UTF-8"); - - /* requestToSign = "apikey=" + encodedApiKey + "&command=enableStaticNat"+"&id=" + encodedSourceNatPublicIpId + "&virtualMachineId=" + encodedVmId;; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=enableStaticNat&apikey=" + encodedApiKey + "&signature=" + encodedSignature + "&id=" + encodedSourceNatPublicIpId + "&virtualMachineId=" + encodedVmId; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("url is " + url); - logger.info("list ip addresses for user " + userId + " response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map success = getSingleValueFromXML(is, new String[] { "success" }); - logger.info("Enable Static NAT..success? " + success.get("success")); - } else { - logger.error("Enable Static NAT failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - */ - // ------------------------------------------------------------- - // CREATE IP FORWARDING RULE -- Linux VM - // ------------------------------------------------------------- - String encodedVmId = URLEncoder.encode(s_linuxVmId.get(), "UTF-8"); - String encodedIpAddress = URLEncoder.encode(s_linuxIpId.get(), "UTF-8"); - requestToSign = - "apikey=" + encodedApiKey + "&command=createPortForwardingRule&ipaddressid=" + encodedIpAddress + "&privateport=22&protocol=TCP&publicport=22" + - "&virtualmachineid=" + encodedVmId; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = - developerServer + "?command=createPortForwardingRule&apikey=" + encodedApiKey + "&ipaddressid=" + encodedIpAddress + - "&privateport=22&protocol=TCP&publicport=22&virtualmachineid=" + encodedVmId + "&signature=" + encodedSignature; - - logger.info("Created port forwarding rule with " + url); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, new String[] {"id"}); - logger.info("Port forwarding rule was assigned successfully to Linux VM"); - long ipfwdid = Long.parseLong(values.get("id")); - logger.info("got Port Forwarding Rule's Id:" + ipfwdid); - s_linipfwdid.set(values.get("id")); - - } else { - logger.error("Port forwarding rule creation failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // Create snapshot recurring policy if needed; otherwise create windows vm - if (snapshotTest.equals("yes")) { - - // list volumes for linux vm - { - url = server + "?command=listVolumes&virtualMachineId=" + s_linuxVmId.get() + "&type=root"; - logger.info("Getting rootDisk id of Centos vm"); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("List volumes response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map success = getSingleValueFromXML(is, new String[] {"id"}); - if (success.get("id") == null) { - logger.error("Unable to get root volume for linux vm. Followin url was sent: " + url); - } - logger.info("Got rootVolume for linux vm with id " + success.get("id")); - s_rootVolume.set(success.get("id")); - } else { - logger.error("List volumes for linux vm failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - // Create recurring snapshot policy for linux vm - { - String encodedTimeZone = URLEncoder.encode("America/Los Angeles", "UTF-8"); - url = - server + "?command=createSnapshotPolicy&intervaltype=hourly&schedule=10&maxsnaps=4&volumeid=" + s_rootVolume.get() + "&timezone=" + - encodedTimeZone; - logger.info("Creating recurring snapshot policy for linux vm ROOT disk"); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("Create recurring snapshot policy for linux vm ROOT disk: " + responseCode); - if (responseCode != 200) { - logger.error("Create recurring snapshot policy for linux vm ROOT disk failed with error code: " + responseCode + ". Following URL was sent: " + - url); - return responseCode; - } - } - } else { - // --------------------------------- - // DEPLOY WINDOWS VM - // --------------------------------- - String windowsVMPrivateIP = null; - { - // long templateId = 6; - long templateId = 4; - String encodedZoneId = URLEncoder.encode("" + zoneId, "UTF-8"); - String encodedServiceOfferingId = URLEncoder.encode("" + serviceOfferingId, "UTF-8"); - String encodedTemplateId = URLEncoder.encode("" + templateId, "UTF-8"); - encodedApiKey = URLEncoder.encode(s_apiKey.get(), "UTF-8"); - String encodedNetworkIds = URLEncoder.encode(s_networkId.get() + ",206", "UTF-8"); - - requestToSign = - "apikey=" + encodedApiKey + "&command=deployVirtualMachine&diskofferingid=" + diskOfferingId + "&networkids=" + encodedNetworkIds + - "&serviceofferingid=" + encodedServiceOfferingId + "&templateid=" + encodedTemplateId + "&zoneid=" + encodedZoneId; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = - developerServer + "?command=deployVirtualMachine" + "&zoneid=" + encodedZoneId + "&serviceofferingid=" + encodedServiceOfferingId + - "&diskofferingid=" + diskOfferingId + "&networkids=" + encodedNetworkIds + "&templateid=" + encodedTemplateId + "&apikey=" + encodedApiKey + - "&signature=" + encodedSignature; - - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, new String[] {"id", "ipaddress"}); - - if ((values.get("ipaddress") == null) || (values.get("id") == null)) { - logger.info("deploy windows vm response code: 401, the command was sent with url " + url); - return 401; - } else { - logger.info("deploy windows vm response code: " + responseCode); - windowsVMPrivateIP = values.get("ipaddress"); - long windowsVMId = Long.parseLong(values.get("id")); - logger.info("got windows virtual machine id: " + windowsVMId); - s_windowsVmId.set(values.get("id")); - } - } else { - logger.error("deploy windows vm failes with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - //-------------------------------------------- - // Enable Static NAT for the Non Source NAT Ip - //-------------------------------------------- - - encodedVmId = URLEncoder.encode(s_windowsVmId.get(), "UTF-8"); - encodedPublicIpId = URLEncoder.encode(s_publicIpId.get(), "UTF-8"); - requestToSign = "apikey=" + encodedApiKey + "&command=enableStaticNat" + "&ipaddressid=" + encodedPublicIpId + "&virtualMachineId=" + encodedVmId; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = - developerServer + "?command=enableStaticNat&apikey=" + encodedApiKey + "&ipaddressid=" + encodedPublicIpId + "&signature=" + encodedSignature + - "&virtualMachineId=" + encodedVmId; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("url is " + url); - logger.info("list ip addresses for user " + userId + " response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map success = getSingleValueFromXML(is, new String[] {"success"}); - logger.info("Enable Static NAT..success? " + success.get("success")); - } else { - logger.error("Enable Static NAT failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // ------------------------------------------------------------- - // CREATE IP FORWARDING RULE -- Windows VM - // ------------------------------------------------------------- - - // create port forwarding rule for window vm - encodedIpAddress = URLEncoder.encode(s_windowsIpId.get(), "UTF-8"); - //encodedVmId = URLEncoder.encode(s_windowsVmId.get(), "UTF-8"); - - requestToSign = "apikey=" + encodedApiKey + "&command=createIpForwardingRule&endPort=22&ipaddressid=" + encodedIpAddress + "&protocol=TCP&startPort=22"; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = - developerServer + "?command=createIpForwardingRule&apikey=" + encodedApiKey + "&endPort=22&ipaddressid=" + encodedIpAddress + - "&protocol=TCP&signature=" + encodedSignature + "&startPort=22"; - - logger.info("Created Ip forwarding rule with " + url); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, new String[] {"id"}); - logger.info("Port forwarding rule was assigned successfully to Windows VM"); - long ipfwdid = Long.parseLong(values.get("id")); - logger.info("got Ip Forwarding Rule's Id:" + ipfwdid); - s_winipfwdid.set(values.get("id")); - } else { - logger.error("Port forwarding rule creation failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - } - return responseCode; - } - - private static int executeCleanup(String server, String developerServer, String username) throws HttpException, IOException { - // test steps: - // - get user - // - delete user - - // ----------------------------- - // GET USER - // ----------------------------- - String userId = s_userId.get().toString(); - String encodedUserId = URLEncoder.encode(userId, "UTF-8"); - String url = server + "?command=listUsers&id=" + encodedUserId; - logger.info("Cleaning up resources for user: " + userId + " with url " + url); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - logger.info("get user response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map userInfo = getSingleValueFromXML(is, new String[] {"username", "id", "account"}); - if (!username.equals(userInfo.get("username"))) { - logger.error("get user failed to retrieve requested user, aborting cleanup test" + ". Following URL was sent: " + url); - return -1; - } - - } else { - logger.error("get user failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // ----------------------------- - // UPDATE USER - // ----------------------------- - { - url = server + "?command=updateUser&id=" + userId + "&firstname=delete&lastname=me"; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("update user response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map success = getSingleValueFromXML(is, new String[] {"success"}); - logger.info("update user..success? " + success.get("success")); - } else { - logger.error("update user failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - // ----------------------------- - // Detach existin dataVolume, create a new volume, attach it to the vm - // ----------------------------- - { - url = server + "?command=listVolumes&virtualMachineId=" + s_linuxVmId.get() + "&type=dataDisk"; - logger.info("Getting dataDisk id of Centos vm"); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("List volumes response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map success = getSingleValueFromXML(is, new String[] {"id"}); - logger.info("Got dataDiskVolume with id " + success.get("id")); - s_dataVolume.set(success.get("id")); - } else { - logger.error("List volumes failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - // Detach volume - { - url = server + "?command=detachVolume&id=" + s_dataVolume.get(); - logger.info("Detaching volume with id " + s_dataVolume.get()); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("Detach data volume response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - logger.info("The volume was detached successfully"); - } else { - logger.error("Detach data disk failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - // Delete a volume - { - url = server + "?command=deleteVolume&id=" + s_dataVolume.get(); - logger.info("Deleting volume with id " + s_dataVolume.get()); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("Delete data volume response code: " + responseCode); - if (responseCode == 200) { - logger.info("The volume was deleted successfully"); - } else { - logger.error("Delete volume failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - // Create a new volume - { - url = server + "?command=createVolume&diskofferingid=" + diskOfferingId + "&zoneid=" + zoneId + "&name=newvolume&account=" + s_account.get() + "&domainid=1"; - logger.info("Creating volume...."); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, new String[] {"id"}); - - if (values.get("id") == null) { - logger.info("create volume response code: 401"); - return 401; - } else { - logger.info("create volume response code: " + responseCode); - long volumeId = Long.parseLong(values.get("id")); - logger.info("got volume id: " + volumeId); - s_newVolume.set(values.get("id")); - } - } else { - logger.error("create volume failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - // attach a new volume to the vm - { - url = server + "?command=attachVolume&id=" + s_newVolume.get() + "&virtualmachineid=" + s_linuxVmId.get(); - logger.info("Attaching volume with id " + s_newVolume.get() + " to the vm " + s_linuxVmId.get()); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("Attach data volume response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - logger.info("The volume was attached successfully"); - } else { - logger.error("Attach volume failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - // Create a snapshot - // list volumes - { - url = server + "?command=listVolumes&virtualMachineId=" + s_linuxVmId.get() + "&type=root"; - logger.info("Getting rootDisk id of Centos vm"); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("List volumes response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map success = getSingleValueFromXML(is, new String[] {"id"}); - if (success.get("id") == null) { - logger.error("Unable to get root volume. Followin url was sent: " + url); - } - logger.info("Got rootVolume with id " + success.get("id")); - s_rootVolume.set(success.get("id")); - } else { - logger.error("List volumes failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - // //Create snapshot from root disk volume - String encodedApiKey = URLEncoder.encode(s_apiKey.get(), "UTF-8"); - String requestToSign = "apikey=" + encodedApiKey + "&command=createSnapshot&volumeid=" + s_rootVolume.get(); - requestToSign = requestToSign.toLowerCase(); - String signature = signRequest(requestToSign, s_secretKey.get()); - String encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=createSnapshot&volumeid=" + s_rootVolume.get() + "&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("Create snapshot response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, new String[] {"id"}); - - if (values.get("id") == null) { - logger.info("create snapshot response code: 401"); - return 401; - } else { - logger.info("create snapshot response code: " + responseCode + ". Got snapshot with id " + values.get("id")); - s_snapshot.set(values.get("id")); - } - } else { - logger.error("create snapshot failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // Create volume from the snapshot created on the previous step and attach it to the running vm - /* encodedApiKey = URLEncoder.encode(s_apiKey.get(), "UTF-8"); - requestToSign = "apikey=" + encodedApiKey + "&command=createVolume&name=" + s_account.get() + "&snapshotid=" + s_snapshot.get(); - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=createVolume&name=" + s_account.get() + "&snapshotid=" + s_snapshot.get() + "&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("Create volume from snapshot response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, new String[] { "id" }); - - if (values.get("id") == null) { - logger.info("create volume from snapshot response code: 401"); - return 401; - } else { - logger.info("create volume from snapshot response code: " + responseCode + ". Got volume with id " + values.get("id") + ". The command was sent with url " + url); - s_volumeFromSnapshot.set(values.get("id")); - } - } else { - logger.error("create volume from snapshot failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - { - url = server + "?command=attachVolume&id=" + s_volumeFromSnapshot.get() + "&virtualmachineid=" + s_linuxVmId.get(); - logger.info("Attaching volume with id " + s_volumeFromSnapshot.get() + " to the vm " + s_linuxVmId.get()); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("Attach volume from snapshot to linux vm response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - logger.info("The volume created from snapshot was attached successfully to linux vm"); - } else { - logger.error("Attach volume created from snapshot failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - */ - // ----------------------------- - // Execute reboot/stop/start commands for the VMs before deleting the account - made to exercise xen - // ----------------------------- - - // Reboot windows VM - requestToSign = "apikey=" + encodedApiKey + "&command=rebootVirtualMachine&id=" + s_windowsVmId.get(); - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=rebootVirtualMachine&id=" + s_windowsVmId.get() + "&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("Reboot windows Vm response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map success = getSingleValueFromXML(el, new String[] {"success"}); - logger.info("Windows VM was rebooted with the status: " + success.get("success")); - } else { - logger.error("Reboot windows VM test failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // Stop centos VM - requestToSign = "apikey=" + encodedApiKey + "&command=stopVirtualMachine&id=" + s_linuxVmId.get(); - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=stopVirtualMachine&id=" + s_linuxVmId.get() + "&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("Stop linux Vm response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map success = getSingleValueFromXML(el, new String[] {"success"}); - logger.info("Linux VM was stopped with the status: " + success.get("success")); - } else { - logger.error("Stop linux VM test failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // Create private template from root disk volume - requestToSign = - "apikey=" + encodedApiKey + "&command=createTemplate" + "&displaytext=" + s_account.get() + "&name=" + s_account.get() + "&ostypeid=11" + "&snapshotid=" + - s_snapshot.get(); - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = - developerServer + "?command=createTemplate" + "&displaytext=" + s_account.get() + "&name=" + s_account.get() + "&ostypeid=11" + "&snapshotid=" + - s_snapshot.get() + "&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("Create private template response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map values = getSingleValueFromXML(el, new String[] {"id"}); - - if (values.get("id") == null) { - logger.info("create private template response code: 401"); - return 401; - } else { - logger.info("create private template response code: " + responseCode); - } - } else { - logger.error("create private template failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // Start centos VM - requestToSign = "apikey=" + encodedApiKey + "&command=startVirtualMachine&id=" + s_windowsVmId.get(); - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=startVirtualMachine&id=" + s_windowsVmId.get() + "&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("Start linux Vm response code: " + responseCode); - if (responseCode != 200) { - logger.error("Start linux VM test failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // get domainRouter id - { - url = server + "?command=listRouters&zoneid=" + zoneId + "&account=" + s_account.get() + "&domainid=1"; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("List domain routers response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map success = getSingleValueFromXML(is, new String[] {"id"}); - logger.info("Got the domR with id " + success.get("id")); - s_domainRouterId.set(success.get("id")); - } else { - logger.error("List domain routers failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - // reboot the domain router - { - url = server + "?command=rebootRouter&id=" + s_domainRouterId.get(); - logger.info("Rebooting domR with id " + s_domainRouterId.get()); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("Reboot domain router response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - logger.info("Domain router was rebooted successfully"); - } else { - logger.error("Reboot domain routers failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - - // ----------------------------- - // DELETE ACCOUNT - // ----------------------------- - { - url = server + "?command=deleteAccount&id=" + s_accountId.get(); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("delete account response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - logger.info("Deleted account successfully"); - } else { - logger.error("delete account failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - return responseCode; - } - - private static int executeEventsAndBilling(String server, String developerServer) throws HttpException, IOException { - // test steps: - // - get all the events in the system for all users in the system - // - generate all the usage records in the system - // - get all the usage records in the system - - // ----------------------------- - // GET EVENTS - // ----------------------------- - String url = server + "?command=listEvents&page=1&pagesize=100&&account=" + s_account.get(); - - logger.info("Getting events for the account " + s_account.get()); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - logger.info("get events response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map> eventDescriptions = getMultipleValuesFromXML(is, new String[] {"description"}); - List descriptionText = eventDescriptions.get("description"); - if (descriptionText == null) { - logger.info("no events retrieved..."); - } else { - for (String text : descriptionText) { - logger.info("event: " + text); - } - } - } else { - logger.error("list events failed with error code: " + responseCode + ". Following URL was sent: " + url); - - return responseCode; - } - - // ------------------------------------------------------------------------------------- - // GENERATE USAGE RECORDS (note: typically this is done infrequently) - // ------------------------------------------------------------------------------------- - DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd"); - Date currentDate = new Date(); - String endDate = dateFormat.format(currentDate); - logger.info("Generating usage records from September 1st till " + endDate); - url = server + "?command=generateUsageRecords&startdate=2009-09-01&enddate=" + endDate; // generate - // all usage record till today - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("generate usage records response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map successStr = getSingleValueFromXML(is, new String[] {"success"}); - logger.info("successfully generated usage records? " + successStr.get("success")); - } else { - logger.error("generate usage records failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // Sleeping for a 2 minutes before getting a usage records from the database - try { - Thread.sleep(120000); - } catch (Exception ex) { - logger.error(ex); - } - - // -------------------------------- - // GET USAGE RECORDS - // -------------------------------- - url = server + "?command=listUsageRecords&startdate=2009-09-01&enddate=" + endDate + "&account=" + s_account.get() + "&domaindid=1"; - logger.info("Getting all usage records with request: " + url); - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("get usage records response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map> usageRecValues = getMultipleValuesFromXML(is, new String[] {"description", "usage"}); - if ((usageRecValues.containsKey("description") == true) && (usageRecValues.containsKey("usage") == true)) { - List descriptions = usageRecValues.get("description"); - List usages = usageRecValues.get("usage"); - for (int i = 0; i < descriptions.size(); i++) { - String desc = descriptions.get(i); - String usage = ""; - if (usages != null) { - if (i < usages.size()) { - usage = ", usage: " + usages.get(i); - } - } - logger.info("desc: " + desc + usage); - } - } - - } else { - logger.error("list usage records failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - return responseCode; - } - - private static boolean getNetworkStat(String server) { - try { - String url = server + "?command=listAccountStatistics&account=" + s_account.get(); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - logger.info("listAccountStatistics response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map requestKeyValues = getSingleValueFromXML(is, new String[] {"receivedbytes", "sentbytes"}); - int bytesReceived = Integer.parseInt(requestKeyValues.get("receivedbytes")); - int bytesSent = Integer.parseInt(requestKeyValues.get("sentbytes")); - if ((bytesReceived > 100000000) && (bytesSent > 0)) { - logger.info("Network stat is correct for account" + s_account.get() + "; bytest received is " + toHumanReadableSize(bytesReceived) + " and bytes sent is " + toHumanReadableSize(bytesSent)); - return true; - } else { - logger.error("Incorrect value for bytes received/sent for the account " + s_account.get() + ". We got " + toHumanReadableSize(bytesReceived) + " bytes received; " + - " and " + toHumanReadableSize(bytesSent) + " bytes sent"); - return false; - } - - } else { - logger.error("listAccountStatistics failed with error code: " + responseCode + ". Following URL was sent: " + url); - return false; - } - } catch (Exception ex) { - logger.error("Exception while sending command listAccountStatistics"); - return false; - } - } - - private static int executeStop(String server, String developerServer, String username, boolean destroy) throws HttpException, IOException { - // test steps: - // - get userId for the given username - // - list virtual machines for the user - // - stop all virtual machines - // - get ip addresses for the user - // - release ip addresses - - // ----------------------------- - // GET USER - // ----------------------------- - String userId = s_userId.get().toString(); - String encodedUserId = URLEncoder.encode(userId, "UTF-8"); - - String url = server + "?command=listUsers&id=" + encodedUserId; - logger.info("Stopping resources for user: " + username); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - logger.info("get user response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map userIdValues = getSingleValueFromXML(is, new String[] {"id"}); - String userIdStr = userIdValues.get("id"); - if (userIdStr != null) { - userId = userIdStr; - - } else { - logger.error("get user failed to retrieve a valid user id, aborting depolyment test" + ". Following URL was sent: " + url); - return -1; - } - } else { - logger.error("get user failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - { - // ---------------------------------- - // LIST VIRTUAL MACHINES - // ---------------------------------- - String encodedApiKey = URLEncoder.encode(s_apiKey.get(), "UTF-8"); - String requestToSign = "apikey=" + encodedApiKey + "&command=listVirtualMachines"; - requestToSign = requestToSign.toLowerCase(); - String signature = signRequest(requestToSign, s_secretKey.get()); - String encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=listVirtualMachines&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - - logger.info("Listing all virtual machines for the user with url " + url); - String[] vmIds = null; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("list virtual machines response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map> vmIdValues = getMultipleValuesFromXML(is, new String[] {"id"}); - if (vmIdValues.containsKey("id")) { - List vmIdList = vmIdValues.get("id"); - if (vmIdList != null) { - vmIds = new String[vmIdList.size()]; - vmIdList.toArray(vmIds); - String vmIdLogStr = ""; - if ((vmIds != null) && (vmIds.length > 0)) { - vmIdLogStr = vmIds[0]; - for (int i = 1; i < vmIds.length; i++) { - vmIdLogStr = vmIdLogStr + "," + vmIds[i]; - } - } - logger.info("got virtual machine ids: " + vmIdLogStr); - } - } - - } else { - logger.error("list virtual machines test failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // ---------------------------------- - // LIST USER IP ADDRESSES - // ---------------------------------- - - requestToSign = "apikey=" + encodedApiKey + "&command=listPublicIpAddresses"; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=listPublicIpAddresses&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - String[] ipAddresses = null; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("list ip addresses for user " + userId + " response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map> ipAddressValues = getMultipleValuesFromXML(is, new String[] {"ipaddress"}); - if (ipAddressValues.containsKey("ipaddress")) { - List ipAddressList = ipAddressValues.get("ipaddress"); - if (ipAddressList != null) { - ipAddresses = new String[ipAddressList.size()]; - ipAddressList.toArray(ipAddresses); - String ipAddressLogStr = ""; - if ((ipAddresses != null) && (ipAddresses.length > 0)) { - ipAddressLogStr = ipAddresses[0]; - for (int i = 1; i < ipAddresses.length; i++) { - ipAddressLogStr = ipAddressLogStr + "," + ipAddresses[i]; - } - } - logger.info("got IP addresses: " + ipAddressLogStr); - } - } - - } else { - logger.error("list user ip addresses failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // ---------------------------------- - // LIST ZONES - // ---------------------------------- - - requestToSign = "apikey=" + encodedApiKey + "&command=listZones"; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=listZones&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - String[] zoneNames = null; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("list zones response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map> zoneNameValues = getMultipleValuesFromXML(is, new String[] {"name"}); - if (zoneNameValues.containsKey("name")) { - List zoneNameList = zoneNameValues.get("name"); - if (zoneNameList != null) { - zoneNames = new String[zoneNameList.size()]; - zoneNameList.toArray(zoneNames); - String zoneNameLogStr = "\n\n"; - if ((zoneNames != null) && (zoneNames.length > 0)) { - zoneNameLogStr += zoneNames[0]; - for (int i = 1; i < zoneNames.length; i++) { - zoneNameLogStr = zoneNameLogStr + "\n" + zoneNames[i]; - } - - } - zoneNameLogStr += "\n\n"; - logger.info("got zones names: " + zoneNameLogStr); - } - } - - } else { - logger.error("list zones failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // ---------------------------------- - // LIST ACCOUNT STATISTICS - // ---------------------------------- - - requestToSign = "apikey=" + encodedApiKey + "&command=listAccounts"; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=listAccounts&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - String[] statNames = null; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("listAccountStatistics response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map> statValues = getMultipleValuesFromXML(is, new String[] {"receivedbytes"}); - if (statValues.containsKey("receivedbytes")) { - List statList = statValues.get("receivedbytes"); - if (statList != null) { - statNames = new String[statList.size()]; - statList.toArray(statNames); - String statLogStr = "\n\n"; - if ((statNames != null) && (zoneNames.length > 0)) { - statLogStr += statNames[0]; - for (int i = 1; i < statNames.length; i++) { - statLogStr = statLogStr + "\n" + zoneNames[i]; - } - - } - statLogStr += "\n\n"; - logger.info("got accountstatistics: " + statLogStr); - } - } - - } else { - logger.error("listAccountStatistics failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // ---------------------------------- - // LIST TEMPLATES - // ---------------------------------- - - requestToSign = "apikey=" + encodedApiKey + "&command=listTemplates@templatefilter=self"; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=listTemplates&apikey=" + encodedApiKey + "&templatefilter=self&signature=" + encodedSignature; - String[] templateNames = null; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("list templates response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map> templateNameValues = getMultipleValuesFromXML(is, new String[] {"name"}); - - if (templateNameValues.containsKey("name")) { - List templateNameList = templateNameValues.get("name"); - if (templateNameList != null) { - templateNames = new String[templateNameList.size()]; - templateNameList.toArray(templateNames); - String templateNameLogStr = "\n\n"; - if ((templateNames != null) && (templateNames.length > 0)) { - templateNameLogStr += templateNames[0]; - for (int i = 1; i < templateNames.length; i++) { - templateNameLogStr = templateNameLogStr + "\n" + templateNames[i]; - } - - } - templateNameLogStr += "\n\n"; - logger.info("got template names: " + templateNameLogStr); - } - } - - } else { - logger.error("list templates failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // ---------------------------------- - // LIST SERVICE OFFERINGS - // ---------------------------------- - - requestToSign = "apikey=" + encodedApiKey + "&command=listServiceOfferings"; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=listServiceOfferings&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - String[] serviceOfferingNames = null; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("list service offerings response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map> serviceOfferingNameValues = getMultipleValuesFromXML(is, new String[] {"name"}); - - if (serviceOfferingNameValues.containsKey("name")) { - List serviceOfferingNameList = serviceOfferingNameValues.get("name"); - if (serviceOfferingNameList != null) { - serviceOfferingNames = new String[serviceOfferingNameList.size()]; - serviceOfferingNameList.toArray(serviceOfferingNames); - String serviceOfferingNameLogStr = ""; - if ((serviceOfferingNames != null) && (serviceOfferingNames.length > 0)) { - serviceOfferingNameLogStr = serviceOfferingNames[0]; - for (int i = 1; i < serviceOfferingNames.length; i++) { - serviceOfferingNameLogStr = serviceOfferingNameLogStr + ", " + serviceOfferingNames[i]; - } - } - logger.info("got service offering names: " + serviceOfferingNameLogStr); - } - } - - } else { - logger.error("list service offerings failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // ---------------------------------- - // LIST EVENTS - // --------------------------------- - - url = server + "?command=listEvents&page=1&pagesize=100&&account=" + s_account.get(); - String[] eventDescriptions = null; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("list events response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map> eventNameValues = getMultipleValuesFromXML(is, new String[] {"description"}); - - if (eventNameValues.containsKey("description")) { - List eventNameList = eventNameValues.get("description"); - if (eventNameList != null) { - eventDescriptions = new String[eventNameList.size()]; - eventNameList.toArray(eventDescriptions); - String eventNameLogStr = "\n\n"; - if ((eventDescriptions != null) && (eventDescriptions.length > 0)) { - eventNameLogStr += eventDescriptions[0]; - for (int i = 1; i < eventDescriptions.length; i++) { - eventNameLogStr = eventNameLogStr + "\n" + eventDescriptions[i]; - } - } - eventNameLogStr += "\n\n"; - logger.info("got event descriptions: " + eventNameLogStr); - } - } - } else { - logger.error("list events failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // ---------------------------------- - // STOP/DESTROY VIRTUAL MACHINES - // ---------------------------------- - if (vmIds != null) { - String cmdName = (destroy ? "destroyVirtualMachine" : "stopVirtualMachine"); - for (String vmId : vmIds) { - requestToSign = "apikey=" + encodedApiKey + "&command=" + cmdName + "&id=" + vmId; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=" + cmdName + "&id=" + vmId + "&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info(cmdName + " [" + vmId + "] response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - Map success = getSingleValueFromXML(el, new String[] {"success"}); - logger.info(cmdName + "..success? " + success.get("success")); - } else { - logger.error(cmdName + "test failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - } - } - - { - String[] ipAddresses = null; - // ----------------------------------------- - // LIST NAT IP ADDRESSES - // ----------------------------------------- - String encodedApiKey = URLEncoder.encode(s_apiKey.get(), "UTF-8"); - String requestToSign = "apikey=" + encodedApiKey + "&command=listPublicIpAddresses"; - requestToSign = requestToSign.toLowerCase(); - String signature = signRequest(requestToSign, s_secretKey.get()); - String encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=listPublicIpAddresses&apikey=" + encodedApiKey + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("list ip addresses for user " + userId + " response code: " + responseCode); - if (responseCode == 200) { - - InputStream is = method.getResponseBodyAsStream(); - List ipAddressList = getNonSourceNatIPs(is); - ipAddresses = new String[ipAddressList.size()]; - ipAddressList.toArray(ipAddresses); - String ipAddrLogStr = ""; - if ((ipAddresses != null) && (ipAddresses.length > 0)) { - ipAddrLogStr = ipAddresses[0]; - for (int i = 1; i < ipAddresses.length; i++) { - ipAddrLogStr = ipAddrLogStr + "," + ipAddresses[i]; - } - } - logger.info("got ip addresses: " + ipAddrLogStr); - - } else { - logger.error("list nat ip addresses failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // ------------------------------------------------------------- - // Delete IP FORWARDING RULE -- Windows VM - // ------------------------------------------------------------- - String encodedIpFwdId = URLEncoder.encode(s_winipfwdid.get(), "UTF-8"); - - requestToSign = "apikey=" + encodedApiKey + "&command=deleteIpForwardingRule&id=" + encodedIpFwdId; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=deleteIpForwardingRule&apikey=" + encodedApiKey + "&id=" + encodedIpFwdId + "&signature=" + encodedSignature; - - logger.info("Delete Ip forwarding rule with " + url); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element el = queryAsyncJobResult(server, input); - logger.info("IP forwarding rule was successfully deleted"); - - } else { - logger.error("IP forwarding rule creation failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - //-------------------------------------------- - // Disable Static NAT for the Source NAT Ip - //-------------------------------------------- - encodedApiKey = URLEncoder.encode(s_apiKey.get(), "UTF-8"); - String encodedPublicIpId = URLEncoder.encode(s_publicIpId.get(), "UTF-8"); - requestToSign = "apikey=" + encodedApiKey + "&command=disableStaticNat" + "&id=" + encodedPublicIpId; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=disableStaticNat&apikey=" + encodedApiKey + "&id=" + encodedPublicIpId + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("url is " + url); - logger.info("list ip addresses for user " + userId + " response code: " + responseCode); - if (responseCode == 200) { - InputStream is = method.getResponseBodyAsStream(); - Map success = getSingleValueFromXML(is, new String[] {"success"}); - logger.info("Disable Static NAT..success? " + success.get("success")); - } else { - logger.error("Disable Static NAT failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - - // ----------------------------------------- - // DISASSOCIATE IP ADDRESSES - // ----------------------------------------- - if (ipAddresses != null) { - for (String ipAddress : ipAddresses) { - requestToSign = "apikey=" + encodedApiKey + "&command=disassociateIpAddress&id=" + ipAddress; - requestToSign = requestToSign.toLowerCase(); - signature = signRequest(requestToSign, s_secretKey.get()); - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - - url = developerServer + "?command=disassociateIpAddress&apikey=" + encodedApiKey + "&id=" + ipAddress + "&signature=" + encodedSignature; - client = new HttpClient(); - method = new GetMethod(url); - responseCode = client.executeMethod(method); - logger.info("disassociate ip address [" + userId + "/" + ipAddress + "] response code: " + responseCode); - if (responseCode == 200) { - InputStream input = method.getResponseBodyAsStream(); - Element disassocipel = queryAsyncJobResult(server, input); - Map success = getSingleValueFromXML(disassocipel, new String[] {"success"}); - // Map success = getSingleValueFromXML(input, new String[] { "success" }); - logger.info("disassociate ip address..success? " + success.get("success")); - } else { - logger.error("disassociate ip address failed with error code: " + responseCode + ". Following URL was sent: " + url); - return responseCode; - } - } - } - } - s_linuxIP.set(""); - s_linuxIpId.set(""); - s_linuxVmId.set(""); - s_linuxPassword.set(""); - s_windowsIP.set(""); - s_windowsIpId.set(""); - s_windowsVmId.set(""); - s_secretKey.set(""); - s_apiKey.set(""); - s_userId.set(Long.parseLong("0")); - s_account.set(""); - s_domainRouterId.set(""); - return responseCode; - } - - public static String signRequest(String request, String key) { - try { - Mac mac = Mac.getInstance("HmacSHA1"); - SecretKeySpec keySpec = new SecretKeySpec(key.getBytes(), "HmacSHA1"); - mac.init(keySpec); - mac.update(request.getBytes()); - byte[] encryptedBytes = mac.doFinal(); - return org.apache.commons.codec.binary.Base64.encodeBase64String(encryptedBytes); - } catch (Exception ex) { - logger.error("unable to sign request", ex); - } - return null; - } - - private static String sshWinTest(String host) { - if (host == null) { - logger.info("Did not receive a host back from test, ignoring win ssh test"); - return null; - } - - // We will retry 5 times before quitting - int retry = 1; - - while (true) { - try { - if (retry > 0) { - logger.info("Retry attempt : " + retry + " ...sleeping 300 seconds before next attempt. Account is " + s_account.get()); - Thread.sleep(300000); - } - - logger.info("Attempting to SSH into windows host " + host + " with retry attempt: " + retry + " for account " + s_account.get()); - - Connection conn = new Connection(host); - conn.connect(null, 60000, 60000); - - logger.info("User " + s_account.get() + " ssHed successfully into windows host " + host); - boolean success = false; - boolean isAuthenticated = conn.authenticateWithPassword("Administrator", "password"); - if (isAuthenticated == false) { - return "Authentication failed"; - } else { - logger.info("Authentication is successful"); - } - - try { - SCPClient scp = new SCPClient(conn); - scp.put("wget.exe", "wget.exe", "C:\\Users\\Administrator", "0777"); - logger.info("Successfully put wget.exe file"); - } catch (Exception ex) { - logger.error("Unable to put wget.exe " + ex); - } - - if (conn == null) { - logger.error("Connection is null"); - } - Session sess = conn.openSession(); - - logger.info("User + " + s_account.get() + " executing : wget http://" + downloadUrl); - String downloadCommand = "wget http://" + downloadUrl + " && dir dump.bin"; - sess.execCommand(downloadCommand); - - InputStream stdout = sess.getStdout(); - InputStream stderr = sess.getStderr(); - - byte[] buffer = new byte[8192]; - while (true) { - if ((stdout.available() == 0) && (stderr.available() == 0)) { - int conditions = sess.waitForCondition(ChannelCondition.STDOUT_DATA | ChannelCondition.STDERR_DATA | ChannelCondition.EOF, 120000); - - if ((conditions & ChannelCondition.TIMEOUT) != 0) { - logger.info("Timeout while waiting for data from peer."); - return null; - } - - if ((conditions & ChannelCondition.EOF) != 0) { - if ((conditions & (ChannelCondition.STDOUT_DATA | ChannelCondition.STDERR_DATA)) == 0) { - break; - } - } - } - - while (stdout.available() > 0) { - success = true; - int len = stdout.read(buffer); - if (len > 0) // this check is somewhat paranoid - logger.info(new String(buffer, 0, len)); - } - - while (stderr.available() > 0) { - /* int len = */stderr.read(buffer); - } - } - sess.close(); - conn.close(); - - if (success) { - return null; - } else { - retry++; - if (retry == MAX_RETRY_WIN) { - return "SSH Windows Network test fail for account " + s_account.get(); - } - } - } catch (Exception e) { - logger.error(e); - retry++; - if (retry == MAX_RETRY_WIN) { - return "SSH Windows Network test fail with error " + e.getMessage(); - } - } - } - } - - private static String sshTest(String host, String password, String snapshotTest) { - int i = 0; - if (host == null) { - logger.info("Did not receive a host back from test, ignoring ssh test"); - return null; - } - - if (password == null) { - logger.info("Did not receive a password back from test, ignoring ssh test"); - return null; - } - - // We will retry 5 times before quitting - String result = null; - int retry = 0; - - while (true) { - try { - if (retry > 0) { - logger.info("Retry attempt : " + retry + " ...sleeping 120 seconds before next attempt. Account is " + s_account.get()); - Thread.sleep(120000); - } - - logger.info("Attempting to SSH into linux host " + host + " with retry attempt: " + retry + ". Account is " + s_account.get()); - - Connection conn = new Connection(host); - conn.connect(null, 60000, 60000); - - logger.info("User + " + s_account.get() + " ssHed successfully into linux host " + host); - - boolean isAuthenticated = conn.authenticateWithPassword("root", password); - - if (isAuthenticated == false) { - logger.info("Authentication failed for root with password" + password); - return "Authentication failed"; - - } - - boolean success = false; - String linuxCommand = null; - - if (i % 10 == 0) - linuxCommand = "rm -rf *; wget http://" + downloadUrl + " && ls -al dump.bin"; - else - linuxCommand = "wget http://" + downloadUrl + " && ls -al dump.bin"; - - Session sess = conn.openSession(); - logger.info("User " + s_account.get() + " executing : " + linuxCommand); - sess.execCommand(linuxCommand); - - InputStream stdout = sess.getStdout(); - InputStream stderr = sess.getStderr(); - - byte[] buffer = new byte[8192]; - while (true) { - if ((stdout.available() == 0) && (stderr.available() == 0)) { - int conditions = sess.waitForCondition(ChannelCondition.STDOUT_DATA | ChannelCondition.STDERR_DATA | ChannelCondition.EOF, 120000); - - if ((conditions & ChannelCondition.TIMEOUT) != 0) { - logger.info("Timeout while waiting for data from peer."); - return null; - } - - if ((conditions & ChannelCondition.EOF) != 0) { - if ((conditions & (ChannelCondition.STDOUT_DATA | ChannelCondition.STDERR_DATA)) == 0) { - break; - } - } - } - - while (stdout.available() > 0) { - success = true; - int len = stdout.read(buffer); - if (len > 0) // this check is somewhat paranoid - logger.info(new String(buffer, 0, len)); - } - - while (stderr.available() > 0) { - /* int len = */stderr.read(buffer); - } - } - - sess.close(); - conn.close(); - - if (!success) { - retry++; - if (retry == MAX_RETRY_LINUX) { - result = "SSH Linux Network test fail"; - } - } - - if (snapshotTest.equals("no")) - return result; - else { - Long sleep = 300000L; - logger.info("Sleeping for " + sleep / 1000 / 60 + "minutes before executing next ssh test"); - Thread.sleep(sleep); - } - } catch (Exception e) { - retry++; - logger.error("SSH Linux Network test fail with error"); - if ((retry == MAX_RETRY_LINUX) && (snapshotTest.equals("no"))) { - return "SSH Linux Network test fail with error " + e.getMessage(); - } - } - i++; - } - } - - public static String createMD5Password(String password) { - MessageDigest md5; - - try { - md5 = MessageDigest.getInstance("MD5"); - } catch (NoSuchAlgorithmException e) { - throw new CloudRuntimeException("Error", e); - } - - md5.reset(); - BigInteger pwInt = new BigInteger(1, md5.digest(password.getBytes())); - - // make sure our MD5 hash value is 32 digits long... - StringBuffer sb = new StringBuffer(); - String pwStr = pwInt.toString(16); - int padding = 32 - pwStr.length(); - for (int i = 0; i < padding; i++) { - sb.append('0'); - } - sb.append(pwStr); - return sb.toString(); - } - - public static Element queryAsyncJobResult(String host, InputStream inputStream) { - Element returnBody = null; - - Map values = getSingleValueFromXML(inputStream, new String[] {"jobid"}); - String jobId = values.get("jobid"); - - if (jobId == null) { - logger.error("Unable to get a jobId"); - return null; - } - - // logger.info("Job id is " + jobId); - String resultUrl = host + "?command=queryAsyncJobResult&jobid=" + jobId; - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(resultUrl); - while (true) { - try { - client.executeMethod(method); - // logger.info("Method is executed successfully. Following url was sent " + resultUrl); - InputStream is = method.getResponseBodyAsStream(); - DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); - DocumentBuilder builder = factory.newDocumentBuilder(); - Document doc = builder.parse(is); - returnBody = doc.getDocumentElement(); - doc.getDocumentElement().normalize(); - Element jobStatusTag = (Element)returnBody.getElementsByTagName("jobstatus").item(0); - String jobStatus = jobStatusTag.getTextContent(); - if (jobStatus.equals("0")) { - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - logger.debug("[ignored] interrupted while during async job result query."); - } - } else { - break; - } - - } catch (Exception ex) { - logger.error(ex); - } - } - return returnBody; - } - -} diff --git a/test/src-not-used/main/java/com/cloud/test/stress/WgetTest.java b/test/src-not-used/main/java/com/cloud/test/stress/WgetTest.java deleted file mode 100644 index f62c41cd457..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/stress/WgetTest.java +++ /dev/null @@ -1,151 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.stress; - -import java.io.InputStream; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - -import com.trilead.ssh2.ChannelCondition; -import com.trilead.ssh2.Connection; -import com.trilead.ssh2.Session; - -public class WgetTest { - - public static final int MAX_RETRY_LINUX = 1; - protected Logger logger = LogManager.getLogger(getClass()); - public static String host = ""; - public static String password = "rs-ccb35ea5"; - - public static void main(String[] args) { - - // Parameters - List argsList = Arrays.asList(args); - Iterator iter = argsList.iterator(); - while (iter.hasNext()) { - String arg = iter.next(); - // host - if (arg.equals("-h")) { - host = iter.next(); - } - //password - - if (arg.equals("-p")) { - password = iter.next(); - } - - } - - int i = 0; - if (host == null || host.equals("")) { - logger.info("Did not receive a host back from test, ignoring ssh test"); - System.exit(2); - } - - if (password == null) { - logger.info("Did not receive a password back from test, ignoring ssh test"); - System.exit(2); - } - int retry = 0; - - try { - if (retry > 0) { - logger.info("Retry attempt : " + retry + " ...sleeping 120 seconds before next attempt"); - Thread.sleep(120000); - } - - logger.info("Attempting to SSH into linux host " + host + " with retry attempt: " + retry); - - Connection conn = new Connection(host); - conn.connect(null, 60000, 60000); - - logger.info("User + ssHed successfully into linux host " + host); - - boolean isAuthenticated = conn.authenticateWithPassword("root", password); - - if (isAuthenticated == false) { - logger.info("Authentication failed for root with password" + password); - System.exit(2); - } - - boolean success = false; - String linuxCommand = null; - - if (i % 10 == 0) - linuxCommand = "rm -rf *; wget http://192.168.1.250/dump.bin && ls -al dump.bin"; - else - linuxCommand = "wget http://192.168.1.250/dump.bin && ls -al dump.bin"; - - Session sess = conn.openSession(); - sess.execCommand(linuxCommand); - - InputStream stdout = sess.getStdout(); - InputStream stderr = sess.getStderr(); - - byte[] buffer = new byte[8192]; - while (true) { - if ((stdout.available() == 0) && (stderr.available() == 0)) { - int conditions = sess.waitForCondition(ChannelCondition.STDOUT_DATA | ChannelCondition.STDERR_DATA | ChannelCondition.EOF, 120000); - - if ((conditions & ChannelCondition.TIMEOUT) != 0) { - logger.info("Timeout while waiting for data from peer."); - System.exit(2); - } - - if ((conditions & ChannelCondition.EOF) != 0) { - if ((conditions & (ChannelCondition.STDOUT_DATA | ChannelCondition.STDERR_DATA)) == 0) { - break; - } - } - } - - while (stdout.available() > 0) { - success = true; - int len = stdout.read(buffer); - if (len > 0) // this check is somewhat paranoid - logger.info(new String(buffer, 0, len)); - } - - while (stderr.available() > 0) { - /* int len = */stderr.read(buffer); - } - } - - sess.close(); - conn.close(); - - if (!success) { - retry++; - if (retry == MAX_RETRY_LINUX) { - System.exit(2); - } - } - } catch (Exception e) { - retry++; - logger.error("SSH Linux Network test fail with error"); - if (retry == MAX_RETRY_LINUX) { - logger.error("Ssh test failed"); - System.exit(2); - } - } - } - -} diff --git a/test/src-not-used/main/java/com/cloud/test/ui/AbstractSeleniumTestCase.java b/test/src-not-used/main/java/com/cloud/test/ui/AbstractSeleniumTestCase.java deleted file mode 100644 index f9e678e2529..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/ui/AbstractSeleniumTestCase.java +++ /dev/null @@ -1,55 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.ui; - -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.openqa.selenium.server.RemoteControlConfiguration; -import org.openqa.selenium.server.SeleniumServer; - -import com.thoughtworks.selenium.DefaultSelenium; - -@RunWith(JUnit4.class) -public abstract class AbstractSeleniumTestCase { - protected static DefaultSelenium selenium; - private static SeleniumServer seleniumServer; - - @BeforeClass - public static void setUp() throws Exception { - System.out.println("*** Starting selenium ... ***"); - RemoteControlConfiguration seleniumConfig = new RemoteControlConfiguration(); - seleniumConfig.setPort(4444); - seleniumServer = new SeleniumServer(seleniumConfig); - seleniumServer.start(); - - String host = System.getProperty("myParam", "localhost"); - selenium = createSeleniumClient("http://" + host + ":" + "8080/client/"); - selenium.start(); - System.out.println("*** Started selenium ***"); - } - - @AfterClass - public static void tearDown() throws Exception { - selenium.stop(); - } - - protected static DefaultSelenium createSeleniumClient(String url) throws Exception { - return new DefaultSelenium("localhost", 4444, "*firefox", url); - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/ui/AddAndDeleteAISO.java b/test/src-not-used/main/java/com/cloud/test/ui/AddAndDeleteAISO.java deleted file mode 100644 index 33eb3089265..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/ui/AddAndDeleteAISO.java +++ /dev/null @@ -1,127 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.ui; - -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import org.junit.Test; - -import com.thoughtworks.selenium.SeleniumException; - -public class AddAndDeleteAISO extends AbstractSeleniumTestCase { - - @Test - public void testAddAndDeleteISO() throws Exception { - try { - selenium.open("/client/"); - selenium.type("account_username", "admin"); - selenium.type("account_password", "password"); - selenium.click("loginbutton"); - Thread.sleep(3000); - assertTrue(selenium.isTextPresent("admin")); - selenium.click("//div[@id='leftmenu_templates']/div"); - selenium.click("//div[@id='leftmenu_submenu_my_iso']/div/div[2]"); - Thread.sleep(3000); - selenium.click("label"); - - selenium.type("add_iso_name", "abc"); - selenium.type("add_iso_display_text", "abc"); - String iso_url = System.getProperty("add_iso_url", "http://10.91.28.6/ISO/Fedora-11-i386-DVD.iso"); - selenium.type("add_iso_url", iso_url); - String iso_zone = System.getProperty("add_iso_zone", "All Zones"); - selenium.select("add_iso_zone", "label=" + iso_zone); - String iso_os_type = System.getProperty("add_iso_os_type", "Fedora 11"); - selenium.select("add_iso_os_type", "label=" + iso_os_type); - selenium.click("//div[28]/div[11]/button[1]"); - Thread.sleep(3000); - int i = 1; - try { - for (;; i++) { - System.out.println("i= " + i); - selenium.click("//div[" + i + "]/div/div[2]/span/span"); - } - } catch (Exception ex) { - logger.info("[ignored]" - + "error during clicking test on iso: " + e.getLocalizedMessage()); - } - - for (int second = 0;; second++) { - if (second >= 60) - fail("timeout"); - try { - if (selenium.isVisible("//div[@id='after_action_info_container_on_top']")) - break; - } catch (Exception e) { - logger.info("[ignored]" - + "error during visibility test of iso: " + e.getLocalizedMessage()); - } - Thread.sleep(10000); - } - - assertTrue(selenium.isTextPresent("Adding succeeded")); - Thread.sleep(3000); - int status = 1; - while (!selenium.isTextPresent("Ready")) { - for (int j = 1; j <= i; j++) - - { - if (selenium.isTextPresent("Ready")) { - status = 0; - break; - } - selenium.click("//div[" + j + "]/div/div[2]/span/span"); - } - if (status == 0) { - break; - } else { - selenium.click("//div[@id='leftmenu_submenu_featured_iso']/div/div[2]"); - Thread.sleep(3000); - selenium.click("//div[@id='leftmenu_submenu_my_iso']/div/div[2]"); - Thread.sleep(3000); - } - - } - selenium.click("link=Delete ISO"); - selenium.click("//div[28]/div[11]/button[1]"); - for (int second = 0;; second++) { - if (second >= 60) - fail("timeout"); - try { - if (selenium.isVisible("after_action_info_container_on_top")) - break; - } catch (Exception e) { - logger.info("[ignored]" - + "error checking visibility after test completion for iso: " + e.getLocalizedMessage()); - } - Thread.sleep(1000); - } - - assertTrue(selenium.isTextPresent("Delete ISO action succeeded")); - selenium.click("main_logout"); - selenium.waitForPageToLoad("30000"); - assertTrue(selenium.isTextPresent("Welcome to Management Console")); - - } catch (SeleniumException ex) { - - System.err.println(ex.getMessage()); - fail(ex.getMessage()); - - throw ex; - } - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/ui/AddAndDeleteATemplate.java b/test/src-not-used/main/java/com/cloud/test/ui/AddAndDeleteATemplate.java deleted file mode 100644 index 12dc9854112..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/ui/AddAndDeleteATemplate.java +++ /dev/null @@ -1,126 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.ui; - -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import org.junit.Test; - -import com.thoughtworks.selenium.SeleniumException; - -public class AddAndDeleteATemplate extends AbstractSeleniumTestCase { - - @Test - public void testAddAndDeleteTemplate() throws Exception { - try { - selenium.open("/client/"); - selenium.type("account_username", "admin"); - selenium.type("account_password", "password"); - selenium.click("loginbutton"); - Thread.sleep(3000); - assertTrue(selenium.isTextPresent("admin")); - selenium.click("//div[@id='leftmenu_templates']/div"); - selenium.click("//div[@id='leftmenu_submenu_my_template']/div/div[2]"); - Thread.sleep(3000); - selenium.click("label"); - selenium.type("add_template_name", "abc"); - selenium.type("add_template_display_text", "abc"); - String template_url = - System.getProperty("add_template_url", "http://10.91.28.6/templates/centos53-x86_64/latest/f59f18fb-ae94-4f97-afd2-f84755767aca.vhd.bz2"); - selenium.type("add_template_url", template_url); - String template_zone = System.getProperty("add_template_zone", "All Zones"); - selenium.select("add_template_zone", "label=" + template_zone); - String template_os_type = System.getProperty("add_template_os_type", "CentOS 5.3 (32-bit)"); - selenium.select("add_template_os_type", "label=" + template_os_type); - selenium.click("//div[28]/div[11]/button[1]"); - Thread.sleep(3000); - int i = 1; - try { - for (;; i++) { - System.out.println("i= " + i); - selenium.click("//div[" + i + "]/div/div[2]/span/span"); - } - } catch (Exception ex) { - logger.info("[ignored]" - + "error during clicking test on template: " + ex.getLocalizedMessage()); - } - - for (int second = 0;; second++) { - if (second >= 60) - fail("timeout"); - try { - if (selenium.isVisible("//div[@id='after_action_info_container_on_top']")) - break; - } catch (Exception e) { - logger.info("[ignored]" - + "error during visibility test of template: " + e.getLocalizedMessage()); - } - Thread.sleep(10000); - } - - assertTrue(selenium.isTextPresent("Adding succeeded")); - Thread.sleep(3000); - int status = 1; - while (!selenium.isTextPresent("Ready")) { - for (int j = 1; j <= i; j++) - - { - if (selenium.isTextPresent("Ready")) { - status = 0; - break; - } - selenium.click("//div[" + j + "]/div/div[2]/span/span"); - } - if (status == 0) { - break; - } else { - selenium.click("//div[@id='leftmenu_submenu_featured_template']/div/div[2]"); - Thread.sleep(3000); - selenium.click("//div[@id='leftmenu_submenu_my_template']/div/div[2]"); - Thread.sleep(3000); - } - - } - selenium.click("link=Delete Template"); - selenium.click("//div[28]/div[11]/button[1]"); - for (int second = 0;; second++) { - if (second >= 60) - fail("timeout"); - try { - if (selenium.isVisible("after_action_info_container_on_top")) - break; - } catch (Exception e) { - logger.info("[ignored]" - + "error checking visibility after test completion for template: " + e.getLocalizedMessage()); - } - Thread.sleep(1000); - } - - assertTrue(selenium.isTextPresent("Delete Template action succeeded")); - selenium.click("main_logout"); - selenium.waitForPageToLoad("30000"); - assertTrue(selenium.isTextPresent("Welcome to Management Console")); - } catch (SeleniumException ex) { - - System.err.println(ex.getMessage()); - fail(ex.getMessage()); - - throw ex; - } - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/ui/UIScenarioTest.java b/test/src-not-used/main/java/com/cloud/test/ui/UIScenarioTest.java deleted file mode 100644 index 3ba7be90953..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/ui/UIScenarioTest.java +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.ui; - -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import org.junit.Test; - -import com.thoughtworks.selenium.SeleniumException; - -public class UIScenarioTest extends AbstractSeleniumTestCase { - - @Test - public void testLoginStartStopVMScenario() throws Exception { - try { - selenium.open("/client/"); - selenium.type("account_username", "admin"); - selenium.type("account_password", "password"); - selenium.click("loginbutton"); - Thread.sleep(3000); - assertTrue(selenium.isTextPresent("admin")); - selenium.click("//div[@id='leftmenu_instances']/div"); - selenium.click("//div[@id='leftmenu_instances_stopped_instances']/div/span"); - - Thread.sleep(3000); - selenium.click("//div[@id='midmenu_startvm_link']/div/div[2]"); - selenium.click("//div[39]/div[11]/button[1]"); - - for (int second = 0;; second++) { - if (second >= 60) - fail("timeout"); - try { - if (selenium.isVisible("//div/p[@id='after_action_info']")) - break; - } catch (Exception e) { - logger.info("[ignored]" - + "error during visibility test after start vm: " + e.getLocalizedMessage()); - } - Thread.sleep(10000); - } - assertTrue(selenium.isTextPresent("Start Instance action succeeded")); - selenium.click("//div[@id='leftmenu_instances_running_instances']/div/span"); - - Thread.sleep(3000); - selenium.click("//div[@id='midmenu_stopvm_link']/div/div[2]"); - selenium.click("//div[39]/div[11]/button[1]"); - for (int second = 0;; second++) { - if (second >= 60) - fail("timeout"); - try { - if (selenium.isVisible("//div/p[@id='after_action_info']")) - break; - } catch (Exception e) { - logger.info("[ignored]" - + "error during visibility test after stop vm: " + e.getLocalizedMessage()); - } - Thread.sleep(10000); - } - - assertTrue(selenium.isTextPresent("Stop Instance action succeeded")); - selenium.click("main_logout"); - selenium.waitForPageToLoad("30000"); - assertTrue(selenium.isTextPresent("Welcome to Management Console")); - - } catch (SeleniumException ex) { - fail(ex.getMessage()); - System.err.println(ex.getMessage()); - throw ex; - } - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/utils/ConsoleProxy.java b/test/src-not-used/main/java/com/cloud/test/utils/ConsoleProxy.java deleted file mode 100644 index 0d2cb8644f6..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/utils/ConsoleProxy.java +++ /dev/null @@ -1,108 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.utils; - -import java.io.BufferedReader; -import java.io.IOException; - - -import com.cloud.utils.script.OutputInterpreter; -import com.cloud.utils.script.Script; - -public class ConsoleProxy implements Runnable { - public static String proxyIp; - private String command; - private int connectionsMade; - private long responseTime; - - public ConsoleProxy(String port, String sid, String host) { - this.command = "https://" + proxyIp + ".realhostip.com:8000/getscreen?w=100&h=75&host=" + host + "&port=" + port + "&sid=" + sid; - logger.info("Command for a console proxy is " + this.command); - this.connectionsMade = 0; - this.responseTime = 0; - } - - public int getConnectionsMade() { - return this.connectionsMade; - } - - public long getResponseTime() { - return this.responseTime; - } - - @Override - public void run() { - while (true) { - - Script myScript = new Script("wget"); - myScript.add(command); - myScript.execute(); - long begin = System.currentTimeMillis(); - WgetInt process = new WgetInt(); - String response = myScript.execute(process); - long end = process.getEnd(); - if (response != null) { - logger.info("Content lenght is incorrect: " + response); - } - - long duration = (end - begin); - this.connectionsMade++; - this.responseTime = this.responseTime + duration; - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - logger.debug("[ignored] interrupted."); - } - - } - } - - public class WgetInt extends OutputInterpreter { - private long end; - - public long getEnd() { - return end; - } - - public void setEnd(long end) { - this.end = end; - } - - @Override - public String interpret(BufferedReader reader) throws IOException { - // TODO Auto-generated method stub - end = System.currentTimeMillis(); - String status = null; - String line = null; - while ((line = reader.readLine()) != null) { - int index = line.indexOf("Length:"); - if (index == -1) { - continue; - } else { - int index1 = line.indexOf("Length: 1827"); - if (index1 == -1) { - return status; - } else - status = line; - } - - } - return status; - } - - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/utils/IpSqlGenerator.java b/test/src-not-used/main/java/com/cloud/test/utils/IpSqlGenerator.java deleted file mode 100644 index e414f12df50..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/utils/IpSqlGenerator.java +++ /dev/null @@ -1,89 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.utils; - -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileOutputStream; -import java.util.StringTokenizer; - -public class IpSqlGenerator { - public static void main(String[] args) { - try { - if (args.length != 5) { - System.out.println("Usage -- generate-ip.sh "); - System.out.println("Example -- generate-ip.sh public 192.168.1.1 192.168.1.255 1 1"); - System.out.println(" will generate ips ranging from public ips 192.168.1.1 to 192.168.1.255 for dc 1 and pod 1"); - return; - } - - String type = args[0]; - - StringTokenizer st = new StringTokenizer(args[1], "."); - int ipS1 = Integer.parseInt(st.nextToken()); - int ipS2 = Integer.parseInt(st.nextToken()); - int ipS3 = Integer.parseInt(st.nextToken()); - int ipS4 = Integer.parseInt(st.nextToken()); - - st = new StringTokenizer(args[2], "."); - int ipE1 = Integer.parseInt(st.nextToken()); - int ipE2 = Integer.parseInt(st.nextToken()); - int ipE3 = Integer.parseInt(st.nextToken()); - int ipE4 = Integer.parseInt(st.nextToken()); - - String dcId = args[3]; - String podId = args[4]; - - if (type.equals("private")) { - FileOutputStream fs = new FileOutputStream(new File("private-ips.sql")); - DataOutputStream out = new DataOutputStream(fs); - for (int i = ipS1; i <= ipE1; i++) { - for (int j = ipS2; j <= ipE2; j++) { - for (int k = ipS3; k <= ipE3; k++) { - for (int l = ipS4; l <= ipE4; l++) { - out.writeBytes("INSERT INTO `vmops`.`dc_ip_address_alloc` (ip_address, data_center_id, pod_id) VALUES ('" + i + "." + j + "." + k + "." + - l + "'," + dcId + "," + podId + ");\r\n"); - } - } - } - } - out.writeBytes("\r\n"); - out.flush(); - out.close(); - } else { - FileOutputStream fs = new FileOutputStream(new File("public-ips.sql")); - DataOutputStream out = new DataOutputStream(fs); - for (int i = ipS1; i <= ipE1; i++) { - for (int j = ipS2; j <= ipE2; j++) { - for (int k = ipS3; k <= ipE3; k++) { - for (int l = ipS4; l <= ipE4; l++) { - out.writeBytes("INSERT INTO `vmops`.`user_ip_address` (ip_address, data_center_id) VALUES ('" + i + "." + j + "." + k + "." + l + "'," + - dcId + ");\r\n"); - } - } - } - } - out.writeBytes("\r\n"); - out.flush(); - out.close(); - } - } catch (Exception e) { - logger.info("[ignored]" - + "error during ip insert generator: " + e.getLocalizedMessage()); - } - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/utils/ProxyLoadTemp.java b/test/src-not-used/main/java/com/cloud/test/utils/ProxyLoadTemp.java deleted file mode 100644 index 6fe3a695d38..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/utils/ProxyLoadTemp.java +++ /dev/null @@ -1,110 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.utils; - -import java.io.BufferedReader; -import java.io.FileReader; -import java.util.ArrayList; - - -public class ProxyLoadTemp { - public static int numThreads = 0; - public static ArrayList proxyList = new ArrayList(); - public static long begin; - public static long end; - public static long sum = 0; - - public ProxyLoadTemp() { - } - - public static void main(String[] args) { - begin = System.currentTimeMillis(); - Runtime.getRuntime().addShutdownHook(new ShutdownThread(new ProxyLoadTemp())); - ConsoleProxy.proxyIp = "172-16-1-101"; - - try { - BufferedReader consoleInput = new BufferedReader(new FileReader("console.input")); - boolean eof = false; - logger.info("Started reading file"); - while (!eof) { - String line = consoleInput.readLine(); - logger.info("Line is " + line); - if (line == null) { - logger.info("Line " + numThreads + " is null"); - eof = true; - } else { - String[] result = null; - try { - logger.info("Starting parsing line " + line); - result = parseLine(line, "[,]"); - logger.info("Line retrieved from the file is " + result[0] + " " + result[1] + " " + result[2]); - ConsoleProxy proxy = new ConsoleProxy(result[0], result[1], result[2]); - proxyList.add(proxy); - new Thread(proxy).start(); - numThreads++; - - } catch (Exception ex) { - logger.warn(ex); - } - } - - } - } catch (Exception e) { - logger.warn(e); - } - - } - - public static class ShutdownThread extends Thread { - ProxyLoadTemp temp; - - public ShutdownThread(ProxyLoadTemp temp) { - this.temp = temp; - } - - @Override - public void run() { - logger.info("Program was running in " + numThreads + " threads"); - - for (int j = 0; j < proxyList.size(); j++) { - long av = 0; - if (proxyList.get(j).getConnectionsMade() != 0) { - av = proxyList.get(j).getResponseTime() / proxyList.get(j).getConnectionsMade(); - } - logger.info("Information for " + j + " thread: Number of requests sent is " + proxyList.get(j).getConnectionsMade() + ". Average response time is " + - av + " milliseconds"); - sum = sum + av; - - } - ProxyLoadTemp.end = System.currentTimeMillis(); - logger.info("Summary for all" + numThreads + " threads: Average response time is " + sum / numThreads + " milliseconds"); - logger.info("Test was running for " + (ProxyLoadTemp.end - ProxyLoadTemp.begin) / 1000 + " seconds"); - } - } - - public static String[] parseLine(String line, String del) throws Exception { - String del1 = del.substring(1, del.length() - 1); - if (line.contains(del1) != true) { - throw new Exception(); - } else { - String[] token = line.split(del); - return token; - } - - } - -} diff --git a/test/src-not-used/main/java/com/cloud/test/utils/SignEC2.java b/test/src-not-used/main/java/com/cloud/test/utils/SignEC2.java deleted file mode 100644 index 97d674c3a36..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/utils/SignEC2.java +++ /dev/null @@ -1,144 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.utils; - -import java.io.FileInputStream; -import java.io.IOException; -import java.net.URLEncoder; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.StringTokenizer; -import java.util.TreeMap; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - -public class SignEC2 { - public static String url; - public static String secretkey; - public static String host; - public static String port; - public static String command; - public static String accessPoint; - protected Logger logger = LogManager.getLogger(getClass()); - - public static void main(String[] args) { - // Parameters - List argsList = Arrays.asList(args); - Iterator iter = argsList.iterator(); - while (iter.hasNext()) { - String arg = iter.next(); - - if (arg.equals("-u")) { - url = iter.next(); - } - } - - Properties prop = new Properties(); - try { - prop.load(new FileInputStream("../conf/tool.properties")); - } catch (IOException ex) { - logger.error("Error reading from ../conf/tool.properties", ex); - System.exit(2); - } - - host = prop.getProperty("host"); - secretkey = prop.getProperty("secretkey"); - port = prop.getProperty("port"); - - if (host == null) { - logger.info("Please set host in tool.properties file"); - System.exit(1); - } - - if (port == null) { - logger.info("Please set port in tool.properties file"); - System.exit(1); - } - - if (url == null) { - logger.info("Please specify url with -u option"); - System.exit(1); - } - - if (secretkey == null) { - logger.info("Please set secretkey in tool.properties file"); - System.exit(1); - } - - if (prop.get("apikey") == null) { - logger.info("Please set apikey in tool.properties file"); - System.exit(1); - } - - if (prop.get("accesspoint") == null) { - logger.info("Please set apikey in tool.properties file"); - System.exit(1); - } - - TreeMap param = new TreeMap(); - - String req = "GET\n" + host + ":" + prop.getProperty("port") + "\n/" + prop.getProperty("accesspoint") + "\n"; - String temp = ""; - param.put("AWSAccessKeyId", prop.getProperty("apikey")); - param.put("Expires", prop.getProperty("expires")); - param.put("SignatureMethod", "HmacSHA1"); - param.put("SignatureVersion", "2"); - param.put("Version", prop.getProperty("version")); - param.put("id", "1"); - - StringTokenizer str1 = new StringTokenizer(url, "&"); - while (str1.hasMoreTokens()) { - String newEl = str1.nextToken(); - StringTokenizer str2 = new StringTokenizer(newEl, "="); - String name = str2.nextToken(); - String value = str2.nextToken(); - param.put(name, value); - } - - //sort url hash map by key - Set c = param.entrySet(); - Iterator it = c.iterator(); - while (it.hasNext()) { - Map.Entry me = (Map.Entry)it.next(); - String key = (String)me.getKey(); - String value = (String)me.getValue(); - try { - temp = temp + key + "=" + URLEncoder.encode(value, "UTF-8") + "&"; - } catch (Exception ex) { - logger.error("Unable to set parameter " + value + " for the command " + param.get("command")); - } - - } - temp = temp.substring(0, temp.length() - 1); - String requestToSign = req + temp; - String signature = UtilsForTest.signRequest(requestToSign, secretkey); - String encodedSignature = ""; - try { - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - } catch (Exception ex) { - logger.error(ex); - } - String url = "http://" + host + ":" + prop.getProperty("port") + "/" + prop.getProperty("accesspoint") + "?" + temp + "&Signature=" + encodedSignature; - logger.info("Url is " + url); - - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/utils/SignRequest.java b/test/src-not-used/main/java/com/cloud/test/utils/SignRequest.java deleted file mode 100644 index 95fd7b29374..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/utils/SignRequest.java +++ /dev/null @@ -1,112 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.utils; - -import java.net.URLEncoder; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.StringTokenizer; -import java.util.TreeMap; - -public class SignRequest { - public static String url; - public static String apikey; - public static String secretkey; - public static String command; - - public static void main(String[] args) { - // Parameters - List argsList = Arrays.asList(args); - Iterator iter = argsList.iterator(); - while (iter.hasNext()) { - String arg = iter.next(); - if (arg.equals("-a")) { - apikey = iter.next(); - - } - if (arg.equals("-u")) { - url = iter.next(); - } - - if (arg.equals("-s")) { - secretkey = iter.next(); - } - } - - if (url == null) { - System.out.println("Please specify url with -u option. Example: -u \"command=listZones&id=1\""); - System.exit(1); - } - - if (apikey == null) { - System.out.println("Please specify apikey with -a option"); - System.exit(1); - } - - if (secretkey == null) { - System.out.println("Please specify secretkey with -s option"); - System.exit(1); - } - - TreeMap param = new TreeMap(); - - String temp = ""; - param.put("apikey", apikey); - - StringTokenizer str1 = new StringTokenizer(url, "&"); - while (str1.hasMoreTokens()) { - String newEl = str1.nextToken(); - StringTokenizer str2 = new StringTokenizer(newEl, "="); - String name = str2.nextToken(); - String value = str2.nextToken(); - param.put(name, value); - } - - //sort url hash map by key - Set c = param.entrySet(); - Iterator it = c.iterator(); - while (it.hasNext()) { - Map.Entry me = (Map.Entry)it.next(); - String key = (String)me.getKey(); - String value = (String)me.getValue(); - try { - temp = temp + key + "=" + URLEncoder.encode(value, "UTF-8") + "&"; - } catch (Exception ex) { - System.out.println("Unable to set parameter " + value + " for the command " + param.get("command")); - } - - } - temp = temp.substring(0, temp.length() - 1); - String requestToSign = temp.toLowerCase(); - System.out.println("After sorting: " + requestToSign); - String signature = UtilsForTest.signRequest(requestToSign, secretkey); - System.out.println("After Base64 encoding: " + signature); - String encodedSignature = ""; - try { - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - } catch (Exception ex) { - System.out.println(ex); - } - System.out.println("After UTF8 encoding: " + encodedSignature); - String url = temp + "&signature=" + encodedSignature; - System.out.println("After sort and add signature: " + url); - - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/utils/SqlDataGenerator.java b/test/src-not-used/main/java/com/cloud/test/utils/SqlDataGenerator.java deleted file mode 100644 index 17618216894..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/utils/SqlDataGenerator.java +++ /dev/null @@ -1,49 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.utils; - -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileOutputStream; -import java.util.Formatter; - -public class SqlDataGenerator { - public static void main(String[] args) { - try { - FileOutputStream fs = new FileOutputStream(new File("out.txt")); - - DataOutputStream out = new DataOutputStream(fs); - - for (int i = 20; i < 171; i++) { - out.writeBytes("INSERT INTO `vmops`.`dc_ip_address_alloc` (ip_address, data_center_id, pod_id) VALUES ('192.168.2." + i + "',1,1);\r\n"); - } - out.writeBytes("\r\n"); - for (int i = 1; i < 10000; i++) { - StringBuilder imagePath = new StringBuilder(); - Formatter formatter = new Formatter(imagePath); - formatter.format("%04x", i); - out.writeBytes("INSERT INTO `vmops`.`dc_vnet_alloc` (vnet, data_center_id) VALUES ('" + imagePath.toString() + "',1);\r\n"); - } - - out.flush(); - out.close(); - } catch (Exception e) { - logger.info("[ignored]" - + "error during sql generation: " + e.getLocalizedMessage()); - } - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/utils/SubmitCert.java b/test/src-not-used/main/java/com/cloud/test/utils/SubmitCert.java deleted file mode 100644 index 088bdee3f0d..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/utils/SubmitCert.java +++ /dev/null @@ -1,199 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.utils; - -import java.io.BufferedReader; -import java.io.FileInputStream; -import java.io.FileReader; -import java.io.IOException; -import java.net.URLEncoder; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.StringTokenizer; -import java.util.TreeMap; - -import org.apache.commons.httpclient.HttpClient; -import org.apache.commons.httpclient.HttpMethod; -import org.apache.commons.httpclient.methods.GetMethod; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - -public class SubmitCert { - public static String url = "Action=SetCertificate"; - public static String secretKey; - public static String apiKey; - public static String host; - public static String port; - public static String command; - public static String accessPoint; - public static String signatureMethod; - public static String fileName = "tool.properties"; - public static String certFileName; - public static String cert; - protected Logger logger = LogManager.getLogger(getClass()); - - public static void main(String[] args) { - // Parameters - List argsList = Arrays.asList(args); - Iterator iter = argsList.iterator(); - while (iter.hasNext()) { - String arg = iter.next(); - - if (arg.equals("-c")) { - certFileName = iter.next(); - } - - if (arg.equals("-s")) { - secretKey = iter.next(); - } - - if (arg.equals("-a")) { - apiKey = iter.next(); - } - - if (arg.equals("-action")) { - url = "Action=" + iter.next(); - } - } - - Properties prop = new Properties(); - try { - prop.load(new FileInputStream("conf/tool.properties")); - } catch (IOException ex) { - logger.error("Error reading from conf/tool.properties", ex); - System.exit(2); - } - - host = prop.getProperty("host"); - port = prop.getProperty("port"); - - if (url.equals("Action=SetCertificate") && certFileName == null) { - logger.error("Please set path to certificate (including file name) with -c option"); - System.exit(1); - } - - if (secretKey == null) { - logger.error("Please set secretkey with -s option"); - System.exit(1); - } - - if (apiKey == null) { - logger.error("Please set apikey with -a option"); - System.exit(1); - } - - if (host == null) { - logger.error("Please set host in tool.properties file"); - System.exit(1); - } - - if (port == null) { - logger.error("Please set port in tool.properties file"); - System.exit(1); - } - - TreeMap param = new TreeMap(); - - String req = "GET\n" + host + ":" + prop.getProperty("port") + "\n/" + prop.getProperty("accesspoint") + "\n"; - String temp = ""; - - if (certFileName != null) { - cert = readCert(certFileName); - param.put("cert", cert); - } - - param.put("AWSAccessKeyId", apiKey); - param.put("Expires", prop.getProperty("expires")); - param.put("SignatureMethod", prop.getProperty("signaturemethod")); - param.put("SignatureVersion", "2"); - param.put("Version", prop.getProperty("version")); - - StringTokenizer str1 = new StringTokenizer(url, "&"); - while (str1.hasMoreTokens()) { - String newEl = str1.nextToken(); - StringTokenizer str2 = new StringTokenizer(newEl, "="); - String name = str2.nextToken(); - String value = str2.nextToken(); - param.put(name, value); - } - - //sort url hash map by key - Set c = param.entrySet(); - Iterator it = c.iterator(); - while (it.hasNext()) { - Map.Entry me = (Map.Entry)it.next(); - String key = (String)me.getKey(); - String value = (String)me.getValue(); - try { - temp = temp + key + "=" + URLEncoder.encode(value, "UTF-8") + "&"; - } catch (Exception ex) { - logger.error("Unable to set parameter " + value + " for the command " + param.get("command"), ex); - } - - } - temp = temp.substring(0, temp.length() - 1); - String requestToSign = req + temp; - String signature = UtilsForTest.signRequest(requestToSign, secretKey); - String encodedSignature = ""; - try { - encodedSignature = URLEncoder.encode(signature, "UTF-8"); - } catch (Exception ex) { - ex.printStackTrace(); - } - - String url = "http://" + host + ":" + prop.getProperty("port") + "/" + prop.getProperty("accesspoint") + "?" + temp + "&Signature=" + encodedSignature; - logger.info("Sending request with url: " + url + "\n"); - sendRequest(url); - } - - public static String readCert(String filePath) { - try { - StringBuffer fileData = new StringBuffer(1000); - BufferedReader reader = new BufferedReader(new FileReader(filePath)); - char[] buf = new char[1024]; - int numRead = 0; - while ((numRead = reader.read(buf)) != -1) { - String readData = String.valueOf(buf, 0, numRead); - fileData.append(readData); - buf = new char[1024]; - } - reader.close(); - return fileData.toString(); - } catch (Exception ex) { - logger.error(ex); - return null; - } - } - - public static void sendRequest(String url) { - try { - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - String is = method.getResponseBodyAsString(); - logger.info("Response code " + responseCode + ": " + is); - } catch (Exception ex) { - ex.printStackTrace(); - } - - } - -} diff --git a/test/src-not-used/main/java/com/cloud/test/utils/TestClient.java b/test/src-not-used/main/java/com/cloud/test/utils/TestClient.java deleted file mode 100644 index c6d4a93b9b2..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/utils/TestClient.java +++ /dev/null @@ -1,386 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.utils; - -import java.io.InputStream; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.Random; - -import org.apache.commons.httpclient.HttpClient; -import org.apache.commons.httpclient.HttpMethod; -import org.apache.commons.httpclient.methods.GetMethod; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.apache.log4j.NDC; - -import com.trilead.ssh2.ChannelCondition; -import com.trilead.ssh2.Connection; -import com.trilead.ssh2.SCPClient; -import com.trilead.ssh2.Session; - -public class TestClient { - private static long sleepTime = 180000L; // default 0 - private static boolean cleanUp = true; - protected Logger logger = LogManager.getLogger(getClass()); - private static boolean repeat = true; - private static int numOfUsers = 0; - private static String[] users = null; - private static boolean internet = true; - - private static final int MAX_RETRY_LINUX = 5; - private static final int MAX_RETRY_WIN = 10; - - public static void main(String[] args) { - String host = "http://localhost"; - String port = "8080"; - String testUrl = "/client/test"; - int numThreads = 1; - - try { - // Parameters - List argsList = Arrays.asList(args); - Iterator iter = argsList.iterator(); - while (iter.hasNext()) { - String arg = iter.next(); - // host - if (arg.equals("-h")) { - host = "http://" + iter.next(); - } - - if (arg.equals("-p")) { - port = iter.next(); - } - - if (arg.equals("-t")) { - numThreads = Integer.parseInt(iter.next()); - } - - if (arg.equals("-s")) { - sleepTime = Long.parseLong(iter.next()); - } - - if (arg.equals("-c")) { - cleanUp = Boolean.parseBoolean(iter.next()); - if (!cleanUp) - sleepTime = 0L; // no need to wait if we don't ever cleanup - } - - if (arg.equals("-r")) { - repeat = Boolean.parseBoolean(iter.next()); - } - - if (arg.equals("-u")) { - numOfUsers = Integer.parseInt(iter.next()); - } - - if (arg.equals("-i")) { - internet = Boolean.parseBoolean(iter.next()); - } - } - - final String server = host + ":" + port + testUrl; - logger.info("Starting test against server: " + server + " with " + numThreads + " thread(s)"); - if (cleanUp) - logger.info("Clean up is enabled, each test will wait " + sleepTime + " ms before cleaning up"); - - if (numOfUsers > 0) { - logger.info("Pre-generating users for test of size : " + numOfUsers); - users = new String[numOfUsers]; - Random ran = new Random(); - for (int i = 0; i < numOfUsers; i++) { - users[i] = Math.abs(ran.nextInt()) + "-user"; - } - } - - for (int i = 0; i < numThreads; i++) { - new Thread(new Runnable() { - @Override - public void run() { - do { - String username = null; - try { - long now = System.currentTimeMillis(); - Random ran = new Random(); - if (users != null) { - username = users[Math.abs(ran.nextInt()) % numOfUsers]; - } else { - username = Math.abs(ran.nextInt()) + "-user"; - } - NDC.push(username); - - String url = server + "?email=" + username + "&password=" + username + "&command=deploy"; - logger.info("Launching test for user: " + username + " with url: " + url); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - int responseCode = client.executeMethod(method); - boolean success = false; - String reason = null; - if (responseCode == 200) { - if (internet) { - logger.info("Deploy successful...waiting 5 minute before SSH tests"); - Thread.sleep(300000L); // Wait 60 seconds so the linux VM can boot up. - - logger.info("Begin Linux SSH test"); - reason = sshTest(method.getResponseHeader("linuxIP").getValue()); - - if (reason == null) { - logger.info("Linux SSH test successful"); - logger.info("Begin Windows SSH test"); - reason = sshWinTest(method.getResponseHeader("windowsIP").getValue()); - } - } - if (reason == null) { - if (internet) { - logger.info("Windows SSH test successful"); - } else { - logger.info("deploy test successful....now cleaning up"); - if (cleanUp) { - logger.info("Waiting " + sleepTime + " ms before cleaning up vms"); - Thread.sleep(sleepTime); - } else { - success = true; - } - } - if (users == null) { - logger.info("Sending cleanup command"); - url = server + "?email=" + username + "&password=" + username + "&command=cleanup"; - } else { - logger.info("Sending stop DomR / destroy VM command"); - url = server + "?email=" + username + "&password=" + username + "&command=stopDomR"; - } - method = new GetMethod(url); - responseCode = client.executeMethod(method); - if (responseCode == 200) { - success = true; - } else { - reason = method.getStatusText(); - } - } else { - // Just stop but don't destroy the VMs/Routers - logger.info("SSH test failed with reason '" + reason + "', stopping VMs"); - url = server + "?email=" + username + "&password=" + username + "&command=stop"; - responseCode = client.executeMethod(new GetMethod(url)); - } - } else { - // Just stop but don't destroy the VMs/Routers - reason = method.getStatusText(); - logger.info("Deploy test failed with reason '" + reason + "', stopping VMs"); - url = server + "?email=" + username + "&password=" + username + "&command=stop"; - client.executeMethod(new GetMethod(url)); - } - - if (success) { - logger.info("***** Completed test for user : " + username + " in " + ((System.currentTimeMillis() - now) / 1000L) + " seconds"); - } else { - logger.info("##### FAILED test for user : " + username + " in " + ((System.currentTimeMillis() - now) / 1000L) + - " seconds with reason : " + reason); - } - } catch (Exception e) { - logger.warn("Error in thread", e); - try { - HttpClient client = new HttpClient(); - String url = server + "?email=" + username + "&password=" + username + "&command=stop"; - client.executeMethod(new GetMethod(url)); - } catch (Exception e1) { - logger.info("[ignored]" - + "error while executing last resort stop attempt: " + e1.getLocalizedMessage()); - } - } finally { - NDC.clear(); - } - } while (repeat); - } - }).start(); - } - } catch (Exception e) { - logger.error(e); - } - } - - private static String sshWinTest(String host) { - if (host == null) { - logger.info("Did not receive a host back from test, ignoring win ssh test"); - return null; - } - - // We will retry 5 times before quitting - int retry = 0; - - while (true) { - try { - if (retry > 0) { - logger.info("Retry attempt : " + retry + " ...sleeping 300 seconds before next attempt"); - Thread.sleep(300000); - } - - logger.info("Attempting to SSH into windows host " + host + " with retry attempt: " + retry); - - Connection conn = new Connection(host); - conn.connect(null, 60000, 60000); - - logger.info("SSHed successfully into windows host " + host); - boolean success = false; - boolean isAuthenticated = conn.authenticateWithPassword("vmops", "vmops"); - if (isAuthenticated == false) { - return "Authentication failed"; - } - SCPClient scp = new SCPClient(conn); - - scp.put("wget.exe", ""); - - Session sess = conn.openSession(); - logger.info("Executing : wget http://172.16.0.220/dump.bin"); - sess.execCommand("wget http://172.16.0.220/dump.bin && dir dump.bin"); - - InputStream stdout = sess.getStdout(); - InputStream stderr = sess.getStderr(); - - byte[] buffer = new byte[8192]; - while (true) { - if ((stdout.available() == 0) && (stderr.available() == 0)) { - int conditions = sess.waitForCondition(ChannelCondition.STDOUT_DATA | ChannelCondition.STDERR_DATA | ChannelCondition.EOF, 120000); - - if ((conditions & ChannelCondition.TIMEOUT) != 0) { - logger.info("Timeout while waiting for data from peer."); - return null; - } - - if ((conditions & ChannelCondition.EOF) != 0) { - if ((conditions & (ChannelCondition.STDOUT_DATA | ChannelCondition.STDERR_DATA)) == 0) { - break; - } - } - } - - while (stdout.available() > 0) { - success = true; - int len = stdout.read(buffer); - if (len > 0) // this check is somewhat paranoid - logger.info(new String(buffer, 0, len)); - } - - while (stderr.available() > 0) { - int len = stderr.read(buffer); - } - } - sess.close(); - conn.close(); - - if (success) { - return null; - } else { - retry++; - if (retry == MAX_RETRY_WIN) { - return "SSH Windows Network test fail"; - } - } - } catch (Exception e) { - retry++; - if (retry == MAX_RETRY_WIN) { - return "SSH Windows Network test fail with error " + e.getMessage(); - } - } - } - } - - private static String sshTest(String host) { - if (host == null) { - logger.info("Did not receive a host back from test, ignoring ssh test"); - return null; - } - - // We will retry 5 times before quitting - int retry = 0; - - while (true) { - try { - if (retry > 0) { - logger.info("Retry attempt : " + retry + " ...sleeping 120 seconds before next attempt"); - Thread.sleep(120000); - } - - logger.info("Attempting to SSH into linux host " + host + " with retry attempt: " + retry); - - Connection conn = new Connection(host); - conn.connect(null, 60000, 60000); - - logger.info("SSHed successfully into linux host " + host); - - boolean isAuthenticated = conn.authenticateWithPassword("root", "password"); - - if (isAuthenticated == false) { - return "Authentication failed"; - } - boolean success = false; - Session sess = conn.openSession(); - logger.info("Executing : wget http://172.16.0.220/dump.bin"); - sess.execCommand("wget http://172.16.0.220/dump.bin && ls -al dump.bin"); - - InputStream stdout = sess.getStdout(); - InputStream stderr = sess.getStderr(); - - byte[] buffer = new byte[8192]; - while (true) { - if ((stdout.available() == 0) && (stderr.available() == 0)) { - int conditions = sess.waitForCondition(ChannelCondition.STDOUT_DATA | ChannelCondition.STDERR_DATA | ChannelCondition.EOF, 120000); - - if ((conditions & ChannelCondition.TIMEOUT) != 0) { - logger.info("Timeout while waiting for data from peer."); - return null; - } - - if ((conditions & ChannelCondition.EOF) != 0) { - if ((conditions & (ChannelCondition.STDOUT_DATA | ChannelCondition.STDERR_DATA)) == 0) { - break; - } - } - } - - while (stdout.available() > 0) { - success = true; - int len = stdout.read(buffer); - if (len > 0) // this check is somewhat paranoid - logger.info(new String(buffer, 0, len)); - } - - while (stderr.available() > 0) { - int len = stderr.read(buffer); - } - } - - sess.close(); - conn.close(); - - if (success) { - return null; - } else { - retry++; - if (retry == MAX_RETRY_LINUX) { - return "SSH Linux Network test fail"; - } - } - } catch (Exception e) { - retry++; - if (retry == MAX_RETRY_LINUX) { - return "SSH Linux Network test fail with error " + e.getMessage(); - } - } - } - } -} diff --git a/test/src-not-used/main/java/com/cloud/test/utils/UtilsForTest.java b/test/src-not-used/main/java/com/cloud/test/utils/UtilsForTest.java deleted file mode 100644 index 78ba001bb9c..00000000000 --- a/test/src-not-used/main/java/com/cloud/test/utils/UtilsForTest.java +++ /dev/null @@ -1,210 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.test.utils; - -import java.io.InputStream; -import java.math.BigInteger; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import javax.crypto.Mac; -import javax.crypto.spec.SecretKeySpec; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; - -import org.apache.commons.codec.binary.Base64; -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -import com.cloud.utils.exception.CloudRuntimeException; - -public class UtilsForTest { - - private static DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); - - public static boolean verifyTags(Map params) { - boolean result = true; - for (String value : params.keySet()) { - if (params.get(value) == null) { - result = false; - } - } - return result; - } - - public static boolean verifyTagValues(Map params, Map pattern) { - boolean result = true; - - if (pattern != null) { - for (String value : pattern.keySet()) { - if (!pattern.get(value).equals(params.get(value))) { - result = false; - System.out.println("Tag " + value + " has " + params.get(value) + " while expected value is: " + pattern.get(value)); - } - } - } - return result; - } - - public static Map parseXML(InputStream is, String[] tagNames) { - Map returnValues = new HashMap(); - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); - - for (int i = 0; i < tagNames.length; i++) { - NodeList targetNodes = rootElement.getElementsByTagName(tagNames[i]); - if (targetNodes.getLength() <= 0) { - System.out.println("no " + tagNames[i] + " tag in the response"); - returnValues.put(tagNames[i], null); - } else { - returnValues.put(tagNames[i], targetNodes.item(0).getTextContent()); - } - } - } catch (Exception ex) { - System.out.println("error processing XML"); - ex.printStackTrace(); - } - return returnValues; - } - - public static ArrayList> parseMulXML(InputStream is, String[] tagNames) { - ArrayList> returnValues = new ArrayList>(); - - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); - for (int i = 0; i < tagNames.length; i++) { - NodeList targetNodes = rootElement.getElementsByTagName(tagNames[i]); - if (targetNodes.getLength() <= 0) { - System.out.println("no " + tagNames[i] + " tag in XML response...returning null"); - } else { - for (int j = 0; j < targetNodes.getLength(); j++) { - HashMap valueList = new HashMap(); - Node node = targetNodes.item(j); - //parse child nodes - NodeList child = node.getChildNodes(); - for (int c = 0; c < node.getChildNodes().getLength(); c++) { - child.item(c).getNodeName(); - valueList.put(child.item(c).getNodeName(), child.item(c).getTextContent()); - } - returnValues.add(valueList); - } - - } - } - } catch (Exception ex) { - System.out.println(ex); - } - - return returnValues; - } - - public static String createMD5String(String password) { - MessageDigest md5; - try { - md5 = MessageDigest.getInstance("MD5"); - } catch (NoSuchAlgorithmException e) { - throw new CloudRuntimeException("Error", e); - } - - md5.reset(); - BigInteger pwInt = new BigInteger(1, md5.digest(password.getBytes())); - - // make sure our MD5 hash value is 32 digits long... - StringBuffer sb = new StringBuffer(); - String pwStr = pwInt.toString(16); - int padding = 32 - pwStr.length(); - for (int i = 0; i < padding; i++) { - sb.append('0'); - } - sb.append(pwStr); - return sb.toString(); - } - - public static Map getSingleValueFromXML(InputStream is, String[] tagNames) { - Map returnValues = new HashMap(); - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); - - for (int i = 0; i < tagNames.length; i++) { - NodeList targetNodes = rootElement.getElementsByTagName(tagNames[i]); - if (targetNodes.getLength() <= 0) { - System.out.println("no " + tagNames[i] + " tag in XML response...returning null"); - } else { - returnValues.put(tagNames[i], targetNodes.item(0).getTextContent()); - } - } - } catch (Exception ex) { - System.out.println("error processing XML"); - ex.printStackTrace(); - } - return returnValues; - } - - public static Map> getMultipleValuesFromXML(InputStream is, String[] tagNames) { - Map> returnValues = new HashMap>(); - try { - DocumentBuilder docBuilder = factory.newDocumentBuilder(); - Document doc = docBuilder.parse(is); - Element rootElement = doc.getDocumentElement(); - for (int i = 0; i < tagNames.length; i++) { - NodeList targetNodes = rootElement.getElementsByTagName(tagNames[i]); - if (targetNodes.getLength() <= 0) { - System.out.println("no " + tagNames[i] + " tag in XML response...returning null"); - } else { - List valueList = new ArrayList(); - for (int j = 0; j < targetNodes.getLength(); j++) { - Node node = targetNodes.item(j); - valueList.add(node.getTextContent()); - } - returnValues.put(tagNames[i], valueList); - } - } - } catch (Exception ex) { - System.out.println(ex); - } - return returnValues; - } - - public static String signRequest(String request, String key) { - try { - Mac mac = Mac.getInstance("HmacSHA1"); - SecretKeySpec keySpec = new SecretKeySpec(key.getBytes(), "HmacSHA1"); - mac.init(keySpec); - mac.update(request.getBytes()); - byte[] encryptedBytes = mac.doFinal(); - //System.out.println("HmacSHA1 hash: " + encryptedBytes); - return Base64.encodeBase64String(encryptedBytes); - } catch (Exception ex) { - System.out.println("unable to sign request"); - ex.printStackTrace(); - } - return null; - } - -} diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index 9df6bf9efc5..8d28749a637 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -16,11 +16,11 @@ # specific language governing permissions and limitations # under the License. -import os import os.path import sys from xml.dom import minidom from xml.parsers.expat import ExpatError +import difflib ROOT_ADMIN = 'r' @@ -51,6 +51,9 @@ known_categories = { 'VirtualMachine': 'Virtual Machine', 'VM': 'Virtual Machine', 'Vnf': 'Virtual Network Functions', + 'VnfTemplate': 'Virtual Network Functions', + 'GuestSubnet': 'Routing', + 'HypervisorGuestOsNames': 'Guest OS', 'Domain': 'Domain', 'Template': 'Template', 'Iso': 'ISO', @@ -62,96 +65,59 @@ known_categories = { 'StaticNat': 'NAT', 'IpForwarding': 'NAT', 'Host': 'Host', - 'OutOfBand': 'Out-of-band Management', + 'HostTags': 'Host', + 'OutOfBandManagement': 'Out-of-band Management', 'Cluster': 'Cluster', 'Account': 'Account', 'Role': 'Role', 'Snapshot': 'Snapshot', 'User': 'User', + 'UserData': 'User Data', 'Os': 'Guest OS', 'ServiceOffering': 'Service Offering', 'DiskOffering': 'Disk Offering', 'LoadBalancer': 'Load Balancer', - 'SslCert': 'Load Balancer', + 'SslCert': 'SSL Certificates', 'Router': 'Router', - 'SystemVm': 'System VM', 'Configuration': 'Configuration', 'Capabilities': 'Configuration', 'Pod': 'Pod', + 'ManagementNetworkIpRange': 'Pod', 'PublicIpRange': 'Network', 'Zone': 'Zone', 'Vmware' : 'Zone', 'NetworkOffering': 'Network Offering', 'NetworkACL': 'Network ACL', + 'NetworkAclItem': 'Network ACL', 'Network': 'Network', 'CiscoNexus': 'Network', 'OpenDaylight': 'Network', 'createServiceInstance': 'Network', 'addGloboDnsHost': 'Network', - 'createTungstenFabricProvider': 'Tungsten', - 'listTungstenFabricProviders': 'Tungsten', - 'configTungstenFabricService': 'Tungsten', - 'createTungstenFabricPublicNetwork': 'Tungsten', - 'synchronizeTungstenFabricData': 'Tungsten', - 'addTungstenFabricPolicyRule': 'Tungsten', - 'createTungstenFabricPolicy': 'Tungsten', - 'deleteTungstenFabricPolicy': 'Tungsten', - 'removeTungstenFabricPolicyRule': 'Tungsten', - 'listTungstenFabricTag': 'Tungsten', - 'listTungstenFabricTagType': 'Tungsten', - 'listTungstenFabricPolicy': 'Tungsten', - 'listTungstenFabricPolicyRule': 'Tungsten', - 'listTungstenFabricNetwork': 'Tungsten', - 'listTungstenFabricVm': 'Tungsten', - 'listTungstenFabricNic': 'Tungsten', - 'createTungstenFabricTag': 'Tungsten', - 'createTungstenFabricTagType': 'Tungsten', - 'deleteTungstenFabricTag': 'Tungsten', - 'deleteTungstenFabricTagType': 'Tungsten', - 'applyTungstenFabricPolicy': 'Tungsten', - 'applyTungstenFabricTag': 'Tungsten', - 'removeTungstenFabricTag': 'Tungsten', - 'removeTungstenFabricPolicy': 'Tungsten', - 'createTungstenFabricApplicationPolicySet': 'Tungsten', - 'createTungstenFabricFirewallPolicy': 'Tungsten', - 'createTungstenFabricFirewallRule': 'Tungsten', - 'createTungstenFabricServiceGroup': 'Tungsten', - 'createTungstenFabricAddressGroup': 'Tungsten', - 'createTungstenFabricLogicalRouter': 'Tungsten', - 'addTungstenFabricNetworkGatewayToLogicalRouter': 'Tungsten', - 'listTungstenFabricApplicationPolicySet': 'Tungsten', - 'listTungstenFabricFirewallPolicy': 'Tungsten', - 'listTungstenFabricFirewallRule': 'Tungsten', - 'listTungstenFabricServiceGroup': 'Tungsten', - 'listTungstenFabricAddressGroup': 'Tungsten', - 'listTungstenFabricLogicalRouter': 'Tungsten', - 'deleteTungstenFabricApplicationPolicySet': 'Tungsten', - 'deleteTungstenFabricFirewallPolicy': 'Tungsten', - 'deleteTungstenFabricFirewallRule': 'Tungsten', - 'deleteTungstenFabricAddressGroup': 'Tungsten', - 'deleteTungstenFabricServiceGroup': 'Tungsten', - 'deleteTungstenFabricLogicalRouter': 'Tungsten', - 'removeTungstenFabricNetworkGatewayFromLogicalRouter': 'Tungsten', - 'updateTungstenFabricLBHealthMonitor': 'Tungsten', - 'listTungstenFabricLBHealthMonitor': 'Tungsten', + 'TungstenFabric': 'Tungsten', 'listNsxControllers': 'NSX', 'addNsxController': 'NSX', 'deleteNsxController': 'NSX', 'Vpn': 'VPN', - 'Limit': 'Limit', + 'Limit': 'Resource Limit', + 'Netscaler': 'Netscaler', + 'NetscalerControlCenter': 'Netscaler', + 'NetscalerLoadBalancer': 'Netscaler', + 'SolidFire': 'SolidFire', + 'PaloAlto': 'Palo Alto', 'ResourceCount': 'Limit', 'CloudIdentifier': 'Cloud Identifier', 'InstanceGroup': 'VM Group', 'StorageMaintenance': 'Storage Pool', 'StoragePool': 'Storage Pool', 'StorageProvider': 'Storage Pool', + 'StorageScope' : 'Storage Pool', 'updateStorageCapabilities' : 'Storage Pool', 'SecurityGroup': 'Security Group', 'SSH': 'SSH', - 'register': 'Registration', 'AsyncJob': 'Async job', 'Certificate': 'Certificate', - 'Hypervisor': 'Hypervisor', + 'Hypervisor': 'Configuration', 'Alert': 'Alert', 'Event': 'Event', 'login': 'Authentication', @@ -173,19 +139,20 @@ known_categories = { 'ExternalLoadBalancer': 'Ext Load Balancer', 'ExternalFirewall': 'Ext Firewall', 'Usage': 'Usage', - 'TrafficMonitor': 'Usage', - 'TrafficType': 'Usage', + 'TrafficMonitor': 'Network', + 'TrafficType': 'Network', 'Product': 'Product', 'LB': 'Load Balancer', 'ldap': 'LDAP', 'Ldap': 'LDAP', - 'Swift': 'Swift', + 'Swift': 'Image Store', 'S3' : 'S3', - 'SecondaryStorage': 'Host', + 'SecondaryStorage': 'Image Store', 'Project': 'Project', 'Lun': 'Storage', 'Pool': 'Pool', 'VPC': 'VPC', + 'VPCOffering': 'VPC Offering', 'PrivateGateway': 'VPC', 'migrateVpc': 'VPC', 'Simulator': 'simulator', @@ -199,13 +166,15 @@ known_categories = { 'Counter': 'AutoScale', 'Condition': 'AutoScale', 'Api': 'API Discovery', + 'ApiLimit': 'Configuration', 'Region': 'Region', 'Detail': 'Resource metadata', 'addIpToNic': 'Nic', 'removeIpFromNic': 'Nic', 'updateVmNicIp': 'Nic', 'listNics':'Nic', - 'AffinityGroup': 'Affinity Group', + 'AffinityGroup': 'Affinity Group', + 'ImageStore': 'Image Store', 'addImageStore': 'Image Store', 'listImageStore': 'Image Store', 'deleteImageStore': 'Image Store', @@ -226,15 +195,16 @@ known_categories = { 'CacheStores' : 'Cache Stores', 'CacheStore' : 'Cache Store', 'OvsElement' : 'Ovs Element', - 'StratosphereSsp' : ' Stratosphere SSP', + 'StratosphereSsp' : 'Misc Network Service Providers', 'Metrics' : 'Metrics', + 'listClustersMetrics': 'Cluster', + 'VpnUser': 'VPN', + 'listZonesMetrics': 'Metrics', 'Infrastructure' : 'Metrics', - 'listNetscalerControlCenter' : 'Load Balancer', 'listRegisteredServicePackages': 'Load Balancer', 'listNsVpx' : 'Load Balancer', 'destroyNsVPx': 'Load Balancer', 'deployNetscalerVpx' : 'Load Balancer', - 'deleteNetscalerControlCenter' : 'Load Balancer', 'stopNetScalerVpx' : 'Load Balancer', 'deleteServicePackageOffering' : 'Load Balancer', 'destroyNsVpx' : 'Load Balancer', @@ -254,17 +224,16 @@ known_categories = { 'UnmanagedInstance': 'Virtual Machine', 'KubernetesSupportedVersion': 'Kubernetes Service', 'KubernetesCluster': 'Kubernetes Service', - 'UnmanagedInstance': 'Virtual Machine', 'Rolling': 'Rolling Maintenance', 'importVsphereStoragePolicies' : 'vSphere storage policies', 'listVsphereStoragePolicies' : 'vSphere storage policies', 'ConsoleEndpoint': 'Console Endpoint', - 'Shutdown': 'Shutdown', 'importVm': 'Virtual Machine', + 'revertToVMSnapshot': 'Virtual Machine', 'listQuarantinedIp': 'IP Quarantine', 'updateQuarantinedIp': 'IP Quarantine', 'removeQuarantinedIp': 'IP Quarantine', - 'Shutdown': 'Shutdown', + 'Shutdown': 'Management', 'addObjectStoragePool': 'Object Store', 'listObjectStoragePools': 'Object Store', 'deleteObjectStoragePool': 'Object Store', @@ -274,7 +243,19 @@ known_categories = { 'deleteBucket': 'Object Store', 'listBuckets': 'Object Store', 'listVmsForImport': 'Virtual Machine', - 'importVm': 'Virtual Machine' + 'SharedFS': 'Shared FileSystem', + 'SharedFileSystem': 'Shared FileSystem', + 'Webhook': 'Webhook', + 'Webhooks': 'Webhook', + 'purgeExpungedResources': 'Resource', + 'forgotPassword': 'Authentication', + 'resetPassword': 'Authentication', + 'BgpPeer': 'BGP Peer', + 'createASNRange': 'AS Number Range', + 'listASNRange': 'AS Number Range', + 'deleteASNRange': 'AS Number Range', + 'listASNumbers': 'AS Number', + 'releaseASNumber': 'AS Number', } @@ -282,12 +263,19 @@ categories = {} def choose_category(fn): + possible_known_categories = [] for k, v in known_categories.items(): if k in fn: - return v + possible_known_categories.append(k) + + if len(possible_known_categories) > 0: + close_matches = difflib.get_close_matches(fn, possible_known_categories, n=1, cutoff=0.1) + if len(close_matches) > 0: + return known_categories[close_matches[0]] + else: + return known_categories[possible_known_categories[0]] raise Exception('Need to add a category for %s to %s:known_categories' % (fn, __file__)) - sys.exit(1) for f in sys.argv: @@ -338,7 +326,6 @@ def xml_for(command): def write_xml(out, user): with open(out, 'w') as f: cat_strings = [] - for category in categories.keys(): strings = [] for command in categories[category]: diff --git a/tools/apidoc/pom.xml b/tools/apidoc/pom.xml index c16795065f0..f2321df6788 100644 --- a/tools/apidoc/pom.xml +++ b/tools/apidoc/pom.xml @@ -51,7 +51,7 @@ org.codehaus.mojo exec-maven-plugin - 1.2.1 + ${cs.exec-maven-plugin.version} compile diff --git a/tools/appliance/README.md b/tools/appliance/README.md index e9293d6373b..bc5b2014a59 100644 --- a/tools/appliance/README.md +++ b/tools/appliance/README.md @@ -25,7 +25,8 @@ CentOS based built-in user VM template. # Setting up Tools and Environment -- Install packer and latest KVM, qemu on a Linux machine +- Install packer (v1.8.x, v1.9.x tested) and latest KVM, qemu on a Linux x86 + machine (Ubuntu 20.04 tested) - Install tools for exporting appliances: qemu-img, ovftool, faketime, sharutils - Build and install `vhd-util` as described in build.sh or use pre-built binaries at: @@ -33,10 +34,18 @@ CentOS based built-in user VM template. http://packages.shapeblue.com/systemvmtemplate/vhd-util http://packages.shapeblue.com/systemvmtemplate/libvhd.so.1.0 +- For building ARM64 systemvm template on amd64 systems, please also install: + qemu-utils qemu-system-arm qemu-efi-aarch64 + # How to build appliances Just run build.sh, it will export archived appliances for KVM, XenServer, VMWare and HyperV in `dist` directory: - bash build.sh systemvmtemplate + bash build.sh + bash build.sh systemvmtemplate 4.19.1.0 x86_64 + bash build.sh systemvmtemplate 4.19.1.0 aarch64 + +For building builtin x86_64 template run: + bash build.sh builtin diff --git a/tools/appliance/build.sh b/tools/appliance/build.sh index 1c83f9aba51..fa5d0e853cc 100755 --- a/tools/appliance/build.sh +++ b/tools/appliance/build.sh @@ -27,6 +27,8 @@ Usage: (or use command line arg, default systemvmtemplate) * Set \$version to provide version to apply to built appliance (or use command line arg, default empty) + * Set \$target_arch to provide target architecture + (or use command line arg, default to current architecture. Currently x86_64 and aarch64 are implemented) * Set \$BUILD_NUMBER to provide build number to apply to built appliance (or use command line arg, default empty) * Set \$DEBUG=1 to enable debug logging @@ -85,12 +87,18 @@ if [[ ! -z "${JENKINS_HOME}" ]]; then DEBUG=1 fi +# get current system architecture +base_arch=`arch` + # which packer definition to use appliance="${1:-${appliance:-systemvmtemplate}}" # optional version tag to put into the image filename version="${2:-${version:-}}" +# which architecture to build the template for +target_arch="${3:-${target_arch:-${base_arch}}}" + # optional (jenkins) build number tag to put into the image filename BUILD_NUMBER="${4:-${BUILD_NUMBER:-}}" @@ -105,7 +113,7 @@ elif [ ! -z "${BUILD_NUMBER}" ]; then version_tag="-${BUILD_NUMBER}" fi -appliance_build_name=${appliance}${version_tag} +appliance_build_name="${appliance}${version_tag}-${target_arch}" ### ### Generic helper functions @@ -218,7 +226,7 @@ function prepare() { function packer_build() { log INFO "building new image with packer" - cd ${appliance_build_name} && packer build template.json && cd .. + cd ${appliance_build_name} && packer build template-base_${base_arch}-target_${target_arch}.json && cd .. } function stage_vmx() { @@ -227,7 +235,7 @@ function stage_vmx() { displayname = "${1}" annotation = "${1}" guestos = "otherlinux-64" -virtualHW.version = "11" +virtualHW.version = "13" config.version = "8" numvcpus = "1" cpuid.coresPerSocket = "1" @@ -349,10 +357,12 @@ function main() { # process the disk at dist kvm_export - ovm_export - xen_server_export - vmware_export - hyperv_export + if [ "${target_arch}" == "x86_64" ]; then + ovm_export + xen_server_export + vmware_export + hyperv_export + fi rm -f "dist/${appliance}" cd dist && chmod +r * && cd .. cd dist && md5sum * > md5sum.txt && cd .. diff --git a/tools/appliance/systemvmtemplate/http/preseed_aarch64.cfg b/tools/appliance/systemvmtemplate/http/preseed_aarch64.cfg new file mode 100644 index 00000000000..5262603deaa --- /dev/null +++ b/tools/appliance/systemvmtemplate/http/preseed_aarch64.cfg @@ -0,0 +1,122 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +### Localization +# Locale sets language and country. +d-i debian-installer/locale string en_US.UTF-8 +d-i debian-installer/country string IN + +# Keyboard selection. +d-i keymap select us +d-i keyboard-configuration/xkb-keymap select us + +### Network configuration +d-i netcfg/choose_interface select auto +d-i netcfg/get_hostname string systemvm +d-i netcfg/get_domain string apache.org +d-i netcfg/wireless_wep string + +d-i hw-detect/load_firmware boolean true + +### Mirror settings +d-i mirror/country string manual +d-i mirror/http/hostname string deb.debian.org +d-i mirror/http/directory string /debian +d-i mirror/http/proxy string + +### Apt setup +d-i apt-setup/cdrom/set-first false +d-i apt-setup/security-updates boolean true +d-i apt-setup/services-select multiselect security, updates +d-i apt-setup/security_host string security.debian.org +d-i apt-setup/local0/source boolean false +d-i apt-setup/multiarch string i386 +d-i apt-setup/backports boolean true +d-i apt-setup/contrib boolean true +d-i apt-setup/multiverse boolean true +d-i apt-setup/universe boolean true + +### Clock and time zone setup +d-i clock-setup/utc boolean true +d-i time/zone string UTC +d-i clock-setup/ntp boolean true + +### Partitioning +d-i partman-auto/disk string /dev/vda +d-i partman-auto/method string regular +d-i partman-auto/expert_recipe string \ + boot-root :: \ + 538 538 1075 free \ + $iflabel{ gpt } \ + $reusemethod{ } \ + method{ efi } \ + format{ } \ + . \ + 400 60 400 ext2 \ + $primary{ } $bootable{ } \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext2 } \ + mountpoint{ /boot } \ + . \ + 256 1000 256 linux-swap \ + method{ swap } format{ } \ + . \ + 2240 40 4000 ext4 \ + method{ format } format{ } \ + use_filesystem{ } filesystem{ ext4 } \ + mountpoint{ / } \ + . + +d-i partman-md/confirm boolean true +d-i partman-partitioning/confirm_write_new_label boolean true +d-i partman/choose_partition select finish +d-i partman/confirm boolean true +d-i partman/confirm_nooverwrite boolean true +grub-efi-arm64 grub2/force_efi_extra_removable boolean true +d-i partman-partitioning/choose_label select gpt +d-i partman-partitioning/default_label string gpt + +### Base system installation +# ... + +### Account setup +d-i passwd/root-login boolean false +d-i passwd/root-password password password +d-i passwd/root-password-again password password +d-i passwd/user-fullname string Cloud User +d-i passwd/username string cloud +d-i passwd/user-password password cloud +d-i passwd/user-password-again password cloud +d-i user-setup/encrypt-home boolean false +d-i user-setup/allow-password-weak boolean true +d-i passwd/user-default-groups string audio cdrom video admin + +### Package selection +tasksel tasksel/first multiselect ssh-server +d-i pkgsel/include string openssh-server ntp acpid sudo bzip2 openssl +# Allowed values: none, safe-upgrade, full-upgrade +d-i pkgsel/upgrade select full-upgrade +d-i pkgsel/update-policy select none + +popularity-contest popularity-contest/participate boolean false + +### Boot loader installation +d-i grub-installer/only_debian boolean true +d-i grub-installer/bootdev string default +d-i finish-install/reboot_in_progress note + +#### Advanced options diff --git a/tools/appliance/systemvmtemplate/http/preseed.cfg b/tools/appliance/systemvmtemplate/http/preseed_x86_64.cfg similarity index 100% rename from tools/appliance/systemvmtemplate/http/preseed.cfg rename to tools/appliance/systemvmtemplate/http/preseed_x86_64.cfg diff --git a/tools/appliance/systemvmtemplate/scripts/configure_grub.sh b/tools/appliance/systemvmtemplate/scripts/configure_grub.sh index 231aa764449..f9103925b3b 100644 --- a/tools/appliance/systemvmtemplate/scripts/configure_grub.sh +++ b/tools/appliance/systemvmtemplate/scripts/configure_grub.sh @@ -34,7 +34,7 @@ function configure_grub() { GRUB_DEFAULT=0 GRUB_TIMEOUT=0 GRUB_DISTRIBUTOR=Debian -GRUB_CMDLINE_LINUX_DEFAULT="quiet" +GRUB_CMDLINE_LINUX_DEFAULT="quiet fsck.mode=force fsck.repair=yes" GRUB_CMDLINE_LINUX="console=tty0 console=ttyS0,115200n8 console=hvc0 earlyprintk=xen net.ifnames=0 biosdevname=0 debian-installer=en_US nomodeset" GRUB_CMDLINE_XEN="com1=115200 console=com1" GRUB_TERMINAL="console serial" diff --git a/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh b/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh index 1a465f4999f..077cabf8d93 100644 --- a/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh +++ b/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh @@ -41,7 +41,7 @@ function configure_issue() { __?.o/ Apache CloudStack SystemVM $CLOUDSTACK_RELEASE ( )# https://cloudstack.apache.org - (___(_) Debian GNU/Linux 11 \n \l + (___(_) Debian GNU/Linux 12 \n \l EOF } @@ -111,12 +111,14 @@ function configure_services() { systemctl disable haproxy systemctl disable keepalived systemctl disable radvd + systemctl disable frr systemctl disable strongswan-starter systemctl disable x11-common systemctl disable xl2tpd systemctl disable vgauth systemctl disable sshd systemctl disable nfs-common + systemctl disable nfs-server systemctl disable portmap # Disable guest services which will selectively be started based on hypervisor diff --git a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh index 92223cfbd88..d391b5c4e19 100644 --- a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh +++ b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh @@ -60,7 +60,7 @@ function install_packages() { sysstat \ apache2 ssl-cert \ dnsmasq dnsmasq-utils \ - nfs-common \ + nfs-common nfs-server xfsprogs \ samba-common cifs-utils \ xl2tpd bcrelay ppp tdb-tools \ xenstore-utils libxenstore4 \ @@ -73,6 +73,7 @@ function install_packages() { haproxy \ haveged \ radvd \ + frr \ sharutils genisoimage \ strongswan libcharon-extra-plugins libstrongswan-extra-plugins strongswan-charon strongswan-starter \ virt-what open-vm-tools qemu-guest-agent hyperv-daemons cloud-guest-utils \ @@ -83,7 +84,7 @@ function install_packages() { apt_clean # 32 bit architecture support for vhd-util - if [ "${arch}" != "i386" ]; then + if [[ "${arch}" != "i386" && "${arch}" == "amd64" ]]; then dpkg --add-architecture i386 apt-get update ${apt_get} install libuuid1:i386 libc6:i386 @@ -92,17 +93,27 @@ function install_packages() { # Install docker and containerd for CKS curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add - apt-key fingerprint 0EBFCD88 - add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable" + if [ "${arch}" == "arm64" ]; then + add-apt-repository "deb [arch=arm64] https://download.docker.com/linux/debian $(lsb_release -cs) stable" + elif [ "${arch}" == "amd64" ]; then + add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable" + elif [ "${arch}" == "s390x" ]; then + add-apt-repository "deb [arch=s390x] https://download.docker.com/linux/debian $(lsb_release -cs) stable" + else + add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable" + fi apt-get update ${apt_get} install containerd.io apt_clean - install_vhd_util - # Install xenserver guest utilities as debian repos don't have it - wget --no-check-certificate https://download.cloudstack.org/systemvm/debian/xe-guest-utilities_7.20.2-0ubuntu1_amd64.deb - dpkg -i xe-guest-utilities_7.20.2-0ubuntu1_amd64.deb - rm -f xe-guest-utilities_7.20.2-0ubuntu1_amd64.deb + if [ "${arch}" == "amd64" ]; then + install_vhd_util + # Install xenserver guest utilities as debian repos don't have it + wget --no-check-certificate https://download.cloudstack.org/systemvm/debian/xe-guest-utilities_7.20.2-0ubuntu1_amd64.deb + dpkg -i xe-guest-utilities_7.20.2-0ubuntu1_amd64.deb + rm -f xe-guest-utilities_7.20.2-0ubuntu1_amd64.deb + fi } return 2>/dev/null || install_packages diff --git a/tools/appliance/systemvmtemplate/template-base_aarch64-target_aarch64.json b/tools/appliance/systemvmtemplate/template-base_aarch64-target_aarch64.json new file mode 100644 index 00000000000..67493c7c635 --- /dev/null +++ b/tools/appliance/systemvmtemplate/template-base_aarch64-target_aarch64.json @@ -0,0 +1,93 @@ +{ + "_license": "Apache License 2.0", + "builders": [ + { + "accelerator": "kvm", + "boot_command": [ + "c", + "linux /install.a64/vmlinuz ", + "preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed_aarch64.cfg ", + "debian-installer=en_US.UTF-8 ", + "auto ", + "language=en locale=en_US.UTF-8 ", + "kbd-chooser/method=us ", + "keyboard-configuration/xkb-keymap=us ", + "netcfg/get_hostname=systemvm ", + "netcfg/get_domain=apache.org ", + "country=IN keymap=us ", + "fb=false ", + "debconf/frontend=noninteractive ", + "console-setup/ask_detect=false ", + "console-keymaps-at/keymap=us ", + "---", + "", + "initrd /install.a64/initrd.gz", + "", + "boot" + ], + "boot_wait": "180s", + "disk_interface": "virtio", + "cdrom_interface": "virtio-scsi", + "disk_size": "5000M", + "format": "qcow2", + "headless": true, + "http_directory": "http", + "iso_checksum": "sha512:fc3560bb586af14b1d77ab7c2806616916926afcbd5cb3fd5a04a5633dfd91cfbbccada1a123f1ea14c480153b731cbee72a230cea17fd9116b9df8444d8df1c", + "iso_url": "https://cdimage.debian.org/mirror/cdimage/release/12.7.0/arm64/iso-cd/debian-12.7.0-arm64-netinst.iso", + "net_device": "virtio-net", + "output_directory": "../dist", + "qemu_binary": "qemu-system-aarch64", + "qemuargs": [ + [ + "-m", + "500M" + ], + [ + "-machine", + "virt" + ], + [ + "-cpu", + "host" + ], + [ + "-smp", + "1" + ], + [ "-pflash", "/usr/share/AAVMF/AAVMF_CODE.fd" ], + [ "-monitor", "none" ], + [ "-enable-kvm" ], + [ "-boot", "strict=off" ] + ], + "shutdown_command": "sudo halt -p", + "ssh_password": "cloud", + "ssh_timeout": "120m", + "ssh_username": "cloud", + "type": "qemu", + "vm_name": "systemvmtemplate" + } + ], + "description": "CloudStack SystemVM template", + "provisioners": [ + { + "execute_command": "echo 'cloud' | sudo -u root -S bash {{.Path}}", + "scripts": [ + "scripts/apt_upgrade.sh", + "scripts/configure_grub.sh", + "scripts/configure_locale.sh", + "scripts/configure_networking.sh", + "scripts/configure_acpid.sh", + "scripts/install_systemvm_packages.sh", + "scripts/configure_conntrack.sh", + "scripts/authorized_keys.sh", + "scripts/configure_persistent_config.sh", + "scripts/configure_login.sh", + "../cloud_scripts_shar_archive.sh", + "scripts/configure_systemvm_services.sh", + "scripts/cleanup.sh", + "scripts/finalize.sh" + ], + "type": "shell" + } + ] +} diff --git a/tools/appliance/systemvmtemplate/template-base_x86_64-target_aarch64.json b/tools/appliance/systemvmtemplate/template-base_x86_64-target_aarch64.json new file mode 100644 index 00000000000..ed03fd74942 --- /dev/null +++ b/tools/appliance/systemvmtemplate/template-base_x86_64-target_aarch64.json @@ -0,0 +1,91 @@ +{ + "_license": "Apache License 2.0", + "builders": [ + { + "boot_command": [ + "c", + "linux /install.a64/vmlinuz ", + "preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed_aarch64.cfg ", + "debian-installer=en_US.UTF-8 ", + "auto ", + "language=en locale=en_US.UTF-8 ", + "kbd-chooser/method=us ", + "keyboard-configuration/xkb-keymap=us ", + "netcfg/get_hostname=systemvm ", + "netcfg/get_domain=apache.org ", + "country=IN keymap=us ", + "fb=false ", + "debconf/frontend=noninteractive ", + "console-setup/ask_detect=false ", + "console-keymaps-at/keymap=us ", + "---", + "", + "initrd /install.a64/initrd.gz", + "", + "boot" + ], + "boot_wait": "60s", + "disk_interface": "virtio", + "cdrom_interface": "virtio-scsi", + "disk_size": "5000M", + "format": "qcow2", + "headless": true, + "http_directory": "http", + "iso_checksum": "sha512:fc3560bb586af14b1d77ab7c2806616916926afcbd5cb3fd5a04a5633dfd91cfbbccada1a123f1ea14c480153b731cbee72a230cea17fd9116b9df8444d8df1c", + "iso_url": "https://cdimage.debian.org/mirror/cdimage/release/12.7.0/arm64/iso-cd/debian-12.7.0-arm64-netinst.iso", + "net_device": "virtio-net", + "output_directory": "../dist", + "qemu_binary": "qemu-system-aarch64", + "qemuargs": [ + [ + "-m", + "500M" + ], + [ + "-machine", + "virt" + ], + [ + "-cpu", + "cortex-a72" + ], + [ + "-smp", + "1" + ], + [ "-bios", "/usr/share/qemu-efi-aarch64/QEMU_EFI.fd" ], + [ "-monitor", "none" ], + [ "-boot", "strict=off" ] + ], + "shutdown_command": "sudo halt -p", + "ssh_password": "cloud", + "ssh_timeout": "120m", + "ssh_username": "cloud", + "type": "qemu", + "vm_name": "systemvmtemplate" + } + ], + "description": "CloudStack SystemVM template", + "provisioners": [ + { + "execute_command": "echo 'cloud' | sudo -u root -S bash {{.Path}}", + "scripts": [ + "scripts/apt_upgrade.sh", + "scripts/configure_grub.sh", + "scripts/configure_locale.sh", + "scripts/configure_networking.sh", + "scripts/configure_acpid.sh", + "scripts/install_systemvm_packages.sh", + "scripts/configure_conntrack.sh", + "scripts/authorized_keys.sh", + "scripts/configure_persistent_config.sh", + "scripts/configure_login.sh", + "../cloud_scripts_shar_archive.sh", + "scripts/configure_systemvm_services.sh", + "scripts/cleanup.sh", + "scripts/finalize.sh" + ], + "type": "shell" + } + ] +} diff --git a/tools/appliance/systemvmtemplate/template.json b/tools/appliance/systemvmtemplate/template-base_x86_64-target_x86_64.json similarity index 87% rename from tools/appliance/systemvmtemplate/template.json rename to tools/appliance/systemvmtemplate/template-base_x86_64-target_x86_64.json index fe735d26b5e..e209d480334 100644 --- a/tools/appliance/systemvmtemplate/template.json +++ b/tools/appliance/systemvmtemplate/template-base_x86_64-target_x86_64.json @@ -6,7 +6,7 @@ "boot_command": [ "", "install ", - "preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg ", + "preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed_x86_64.cfg ", "debian-installer=en_US.UTF-8 ", "auto ", "language=en locale=en_US.UTF-8 ", @@ -27,8 +27,8 @@ "format": "qcow2", "headless": true, "http_directory": "http", - "iso_checksum": "sha512:33c08e56c83d13007e4a5511b9bf2c4926c4aa12fd5dd56d493c0653aecbab380988c5bf1671dbaea75c582827797d98c4a611f7fb2b131fbde2c677d5258ec9", - "iso_url": "https://download.cloudstack.org/systemvm/debian/debian-12.5.0-amd64-netinst.iso", + "iso_checksum": "sha512:e0bd9ba03084a6fd42413b425a2d20e3731678a31fe5fb2cc84f79332129afca2ad4ec897b4224d6a833afaf28a5d938b0fe5d680983182944162c6825b135ce", + "iso_url": "https://cdimage.debian.org/mirror/cdimage/release/12.7.0/amd64/iso-cd/debian-12.7.0-amd64-netinst.iso", "net_device": "virtio-net", "output_directory": "../dist", "qemuargs": [ diff --git a/tools/checkstyle/src/main/resources/cloud-pmd.xml b/tools/checkstyle/src/main/resources/cloud-pmd.xml index ec2914f68f4..66a4ec08294 100644 --- a/tools/checkstyle/src/main/resources/cloud-pmd.xml +++ b/tools/checkstyle/src/main/resources/cloud-pmd.xml @@ -24,13 +24,13 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://pmd.sf.net/ruleset/1.0.0 http://pmd.sf.net/ruleset_xml_schema.xsd" xsi:noNamespaceSchemaLocation="http://pmd.sf.net/ruleset_xml_schema.xsd"> - + Ruleset that brings all the rulesets we want from the pmd jar, because the maven-pmd-plugin doesn't find them otherwise. This is also the place to add our own future rulesets, if any. - + diff --git a/tools/checkstyle/src/main/resources/cloud-style.xml b/tools/checkstyle/src/main/resources/cloud-style.xml index ae68a7a5f08..0c111747eaf 100644 --- a/tools/checkstyle/src/main/resources/cloud-style.xml +++ b/tools/checkstyle/src/main/resources/cloud-style.xml @@ -1,12 +1,12 @@ - org.codehaus.mojo exec-maven-plugin - 1.2.1 + ${cs.exec-maven-plugin.version} package diff --git a/tools/devcloud4/prefill.sql b/tools/devcloud4/prefill.sql index 4213cd9d68b..00dc69620f8 100644 --- a/tools/devcloud4/prefill.sql +++ b/tools/devcloud4/prefill.sql @@ -5,9 +5,9 @@ -- to you under the Apache License, Version 2.0 (the -- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at --- +-- -- http://www.apache.org/licenses/LICENSE-2.0 --- +-- -- Unless required by applicable law or agreed to in writing, -- software distributed under the License is distributed on an -- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/tools/docker/Dockerfile b/tools/docker/Dockerfile index 995eca784ce..0494e9ae0da 100644 --- a/tools/docker/Dockerfile +++ b/tools/docker/Dockerfile @@ -19,8 +19,7 @@ FROM ubuntu:22.04 -MAINTAINER "Apache CloudStack" -LABEL Vendor="Apache.org" License="ApacheV2" Version="4.20.0.0-SNAPSHOT" +LABEL Vendor="Apache.org" License="ApacheV2" Version="4.20.0.0-SNAPSHOT" Author="Apache CloudStack " ARG DEBIAN_FRONTEND=noninteractive diff --git a/tools/docker/Dockerfile.marvin b/tools/docker/Dockerfile.marvin index 7ce0b629f96..b227e92e608 100644 --- a/tools/docker/Dockerfile.marvin +++ b/tools/docker/Dockerfile.marvin @@ -19,8 +19,7 @@ # build for cloudstack_home_dir not this folder FROM python:2 -MAINTAINER "Apache CloudStack" -LABEL Vendor="Apache.org" License="ApacheV2" Version="4.20.0.0-SNAPSHOT" +LABEL Vendor="Apache.org" License="ApacheV2" Version="4.20.0.0-SNAPSHOT" Author="Apache CloudStack " ENV WORK_DIR=/marvin diff --git a/tools/docker/Dockerfile.smokedev b/tools/docker/Dockerfile.smokedev index 4476f6a3d5f..f929294c2ce 100644 --- a/tools/docker/Dockerfile.smokedev +++ b/tools/docker/Dockerfile.smokedev @@ -19,8 +19,7 @@ FROM ubuntu:16.04 -MAINTAINER "Apache CloudStack" -LABEL Vendor="Apache.org" License="ApacheV2" Version="4.12.0-SNAPSHOT" +LABEL Vendor="Apache.org" License="ApacheV2" Version="4.20.0-SNAPSHOT" Author="Apache CloudStack " RUN apt-get -y update && apt-get install -y \ genisoimage \ diff --git a/tools/marvin/CHANGES.txt b/tools/marvin/CHANGES.txt index 262c8845f72..9dd23677135 100644 --- a/tools/marvin/CHANGES.txt +++ b/tools/marvin/CHANGES.txt @@ -5,9 +5,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/tools/marvin/DISCLAIMER.txt b/tools/marvin/DISCLAIMER.txt index fa1e9261a36..d3b58164b7c 100644 --- a/tools/marvin/DISCLAIMER.txt +++ b/tools/marvin/DISCLAIMER.txt @@ -1,7 +1,7 @@ -Apache CloudStack is an effort undergoing incubation at The Apache Software Foundation (ASF), -sponsored by the Apache Incubator. Incubation is required of all newly accepted -projects until a further review indicates that the infrastructure, communications, and -decision making process have stabilized in a manner consistent with other successful ASF -projects. While incubation status is not necessarily a reflection of the completeness or -stability of the code, it does indicate that the project has yet to be fully endorsed by +Apache CloudStack is an effort undergoing incubation at The Apache Software Foundation (ASF), +sponsored by the Apache Incubator. Incubation is required of all newly accepted +projects until a further review indicates that the infrastructure, communications, and +decision making process have stabilized in a manner consistent with other successful ASF +projects. While incubation status is not necessarily a reflection of the completeness or +stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF. diff --git a/tools/marvin/marvin/cloudstackException.py b/tools/marvin/marvin/cloudstackException.py index 5a2f72d8c59..cfd11d0678c 100644 --- a/tools/marvin/marvin/cloudstackException.py +++ b/tools/marvin/marvin/cloudstackException.py @@ -77,6 +77,7 @@ class CloudstackAclException(): UNABLE_TO_LIST_NETWORK_ACCOUNT = "Can't create/list resources for account" NO_PERMISSION_TO_ACCESS_ACCOUNT = "does not have permission to access resource Acct" NOT_AVAILABLE_IN_DOMAIN = "not available in domain" + NO_PERMISSION_TO_OPERATE_SOURCE = "does not have permission to operate with provided resource" @staticmethod def verifyMsginException(e,message): @@ -88,7 +89,7 @@ class CloudstackAclException(): @staticmethod def verifyErrorCodeinException(e,errorCode): errorString = " errorCode: " + errorCode - if errorString in str(e): + if errorString in str(e): return True else: return False diff --git a/tools/marvin/marvin/cloudstackTestCase.py b/tools/marvin/marvin/cloudstackTestCase.py index 1164cd9f0f8..297031611e9 100644 --- a/tools/marvin/marvin/cloudstackTestCase.py +++ b/tools/marvin/marvin/cloudstackTestCase.py @@ -183,7 +183,7 @@ class cloudstackTestCase(unittest.case.TestCase): sshClient.execute("service httpd start") time.sleep(5) ssh_response = str(sshClient.execute("service httpd status")).lower() - if not "running" in ssh_response: + if "running" not in ssh_response: raise Exception("Failed to start httpd service") self.debug("Setup webserver using apache") diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py index ef9bfd774f7..3485eeb8b18 100644 --- a/tools/marvin/marvin/config/test_data.py +++ b/tools/marvin/marvin/config/test_data.py @@ -177,9 +177,9 @@ test_data = { "service_offering_h2": { "name": "Tagged h2 Small Instance", "displaytext": "Tagged h2 Small Instance", - "cpunumber": 1, - "cpuspeed": 100, - "memory": 256, + "cpunumber": 2, + "cpuspeed": 200, + "memory": 512, "hosttags": "h2" }, "disk_offering": { @@ -450,6 +450,21 @@ test_data = { "UserData": "VirtualRouter" } }, + "shared_network_offering_configdrive": { + "name": "MySharedOfferingWithConfigDrive-shared", + "displaytext": "MySharedOfferingWithConfigDrive", + "guestiptype": "Shared", + "supportedservices": "Dhcp,Dns,UserData", + "specifyVlan": "False", + "specifyIpRanges": "False", + "traffictype": "GUEST", + "tags": "native", + "serviceProviderList": { + "Dhcp": "ConfigDrive", + "Dns": "ConfigDrive", + "UserData": "ConfigDrive" + } + }, "shared_network_offering_all_services": { "name": "shared network offering with services enabled", "displaytext": "Shared network offering", @@ -1034,7 +1049,53 @@ test_data = { "requireshvm": "True", "ispublic": "True", "deployasis": "True" - } + }, + "simulator": { + "name": "tiny-simulator", + "displaytext": "tiny simulator", + "format": "vhd", + "hypervisor": "simulator", + "ostype": "Other Linux (64-bit)", + "url": "http://dl.openvm.eu/cloudstack/macchinina/x86_64/macchinina.vhd.bz2", + "requireshvm": "True", + "ispublic": "True", + "isextractable": "True" + }, + }, + "test_templates_cloud_init": { + "kvm": { + "name": "ubuntu 22.04 kvm", + "displaytext": "ubuntu 22.04 kvm", + "format": "raw", + "hypervisor": "kvm", + "ostype": "Other Linux (64-bit)", + "url": "https://cloud-images.ubuntu.com/releases/22.04/release/ubuntu-22.04-server-cloudimg-amd64.img", + "requireshvm": "True", + "ispublic": "True", + "isextractable": "False" + }, + "xenserver": { + "name": "ubuntu 22.04 xen", + "displaytext": "ubuntu 22.04 xen", + "format": "vhd", + "hypervisor": "xenserver", + "ostype": "Other Linux (64-bit)", + "url": "https://cloud-images.ubuntu.com/releases/22.04/release/ubuntu-22.04-server-cloudimg-amd64-azure.vhd.tar.gz", + "requireshvm": "True", + "ispublic": "True", + "isextractable": "True" + }, + "vmware": { + "name": "ubuntu 22.04 vmware", + "displaytext": "ubuntu 22.04 vmware", + "format": "ova", + "hypervisor": "vmware", + "ostype": "Other Linux (64-bit)", + "url": "https://cloud-images.ubuntu.com/releases/22.04/release/ubuntu-22.04-server-cloudimg-amd64.ova", + "requireshvm": "True", + "ispublic": "True", + "deployasis": "True" + }, }, "test_ovf_templates": [ { diff --git a/tools/marvin/marvin/dbConnection.py b/tools/marvin/marvin/dbConnection.py index 04140bab3c2..eb48e5852ed 100644 --- a/tools/marvin/marvin/dbConnection.py +++ b/tools/marvin/marvin/dbConnection.py @@ -47,7 +47,7 @@ class DbConnection(object): with contextlib.closing(conn.cursor(buffered=True)) as cursor: cursor.execute(sql, params) try: - if sql.lower().startswith('select') and cursor.rowcount > 0: + if sql.lower().startswith('select') and cursor.rowcount > 0: # we have more than just the row count/success resultRow = cursor.fetchall() except errors.InterfaceError: diff --git a/tools/marvin/marvin/deployDataCenter.py b/tools/marvin/marvin/deployDataCenter.py index 4e23a6a5d45..db88f762788 100644 --- a/tools/marvin/marvin/deployDataCenter.py +++ b/tools/marvin/marvin/deployDataCenter.py @@ -172,7 +172,7 @@ class DeployDataCenters(object): def addBaremetalRct(self, config): networktype= config.zones[0].networktype baremetalrcturl= config.zones[0].baremetalrcturl - if networktype is None or baremetalrcturl is None: + if networktype is None or baremetalrcturl is None: return if networktype.lower()=="advanced": diff --git a/tools/marvin/marvin/lib/base.py b/tools/marvin/marvin/lib/base.py index a855908eb0d..557434ea2ee 100755 --- a/tools/marvin/marvin/lib/base.py +++ b/tools/marvin/marvin/lib/base.py @@ -527,7 +527,7 @@ class VirtualMachine: customcpuspeed=None, custommemory=None, rootdisksize=None, rootdiskcontroller=None, vpcid=None, macaddress=None, datadisktemplate_diskoffering_list={}, properties=None, nicnetworklist=None, bootmode=None, boottype=None, dynamicscalingenabled=None, - userdataid=None, userdatadetails=None, extraconfig=None, size=None): + userdataid=None, userdatadetails=None, extraconfig=None, size=None, overridediskofferingid=None): """Create the instance""" cmd = deployVirtualMachine.deployVirtualMachineCmd() @@ -537,6 +537,9 @@ class VirtualMachine: elif "serviceoffering" in services: cmd.serviceofferingid = services["serviceoffering"] + if overridediskofferingid: + cmd.overridediskofferingid = overridediskofferingid + if zoneid: cmd.zoneid = zoneid elif "zoneid" in services: @@ -782,6 +785,8 @@ class VirtualMachine: cmd.virtualmachineid = self.id if templateid: cmd.templateid = templateid + if expunge: + cmd.expunge = expunge if diskofferingid: cmd.diskofferingid = diskofferingid if rootdisksize: @@ -794,7 +799,6 @@ class VirtualMachine: 'key': key, 'value': value }) - return apiclient.restoreVirtualMachine(cmd) def get_ssh_client( @@ -1156,6 +1160,14 @@ class Volume: return Volume(apiclient.createVolume(cmd).__dict__) + def update(self, apiclient, **kwargs): + """Updates the volume""" + + cmd = updateVolume.updateVolumeCmd() + cmd.id = self.id + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + return (apiclient.updateVolume(cmd)) + @classmethod def create_custom_disk(cls, apiclient, services, account=None, domainid=None, diskofferingid=None, projectid=None): @@ -2831,6 +2843,10 @@ class NetworkOffering: cmd.tags = services["tags"] if "internetprotocol" in services: cmd.internetprotocol = services["internetprotocol"] + if "networkmode" in services: + cmd.networkmode = services["networkmode"] + if "routingmode" in services: + cmd.routingmode = services["routingmode"] cmd.details = [{}] if "servicepackageuuid" in services: cmd.details[0]["servicepackageuuid"] = services["servicepackageuuid"] @@ -3549,7 +3565,7 @@ class Network: subdomainaccess=None, zoneid=None, gateway=None, netmask=None, vpcid=None, aclid=None, vlan=None, externalid=None, bypassvlanoverlapcheck=None, associatednetworkid=None, publicmtu=None, privatemtu=None, - sourcenatipaddress=None): + sourcenatipaddress=None, cidrsize=None, **kwargs): """Create Network for account""" cmd = createNetwork.createNetworkCmd() cmd.name = services["name"] @@ -3576,6 +3592,10 @@ class Network: cmd.netmask = netmask elif "netmask" in services: cmd.netmask = services["netmask"] + if cidrsize: + cmd.cidrsize = cidrsize + elif "cidrsize" in services: + cmd.cidrsize = services["cidrsize"] if "startip" in services: cmd.startip = services["startip"] if "endip" in services: @@ -3633,6 +3653,7 @@ class Network: cmd.privatemtu = privatemtu if sourcenatipaddress: cmd.sourcenatipaddress = sourcenatipaddress + [setattr(cmd, k, v) for k, v in list(kwargs.items())] return Network(apiclient.createNetwork(cmd).__dict__) def delete(self, apiclient): @@ -3686,6 +3707,13 @@ class Network: cmd.listall = True return (apiclient.listNetworks(cmd)) + def changeBgpPeers(self, apiclient, bgppeerids): + cmd = changeBgpPeersForNetwork.changeBgpPeersForNetworkCmd() + cmd.networkid = self.id + if bgppeerids is not None: + cmd.bgppeerids = bgppeerids + return (apiclient.changeBgpPeersForNetwork(cmd)) + class NetworkACL: """Manage Network ACL lifecycle""" @@ -5088,6 +5116,10 @@ class VpcOffering: }) if "internetprotocol" in services: cmd.internetprotocol = services["internetprotocol"] + if "networkmode" in services: + cmd.networkmode = services["networkmode"] + if "routingmode" in services: + cmd.routingmode = services["routingmode"] return VpcOffering(apiclient.createVPCOffering(cmd).__dict__) def update(self, apiclient, name=None, displaytext=None, state=None): @@ -5198,6 +5230,13 @@ class VPC: cmd.listall = True return (apiclient.listVPCs(cmd)) + def changeBgpPeers(self, apiclient, bgppeerids): + cmd = changeBgpPeersForVpc.changeBgpPeersForVpcCmd() + cmd.vpcid = self.id + if bgppeerids is not None: + cmd.bgppeerids = bgppeerids + return (apiclient.changeBgpPeersForVpc(cmd)) + class PrivateGateway: """Manage private gateway lifecycle""" @@ -7227,3 +7266,410 @@ class Bucket: cmd.id = self.id [setattr(cmd, k, v) for k, v in list(kwargs.items())] return apiclient.updateBucket(cmd) + +class Webhook: + """Manage Webhook Life cycle""" + + def __init__(self, items): + self.__dict__.update(items) + + @classmethod + def create(cls, apiclient, name, payloadurl, **kwargs): + """Create Webhook""" + cmd = createWebhook.createWebhookCmd() + cmd.name = name + cmd.payloadurl = payloadurl + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + + return Webhook(apiclient.createWebhook(cmd).__dict__) + + @classmethod + def list(cls, apiclient, **kwargs): + cmd = listWebhooks.listWebhooksCmd() + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + if 'account' in list(kwargs.keys()) and 'domainid' in list(kwargs.keys()): + cmd.listall = True + return apiclient.listWebhooks(cmd) + + def delete(self, apiclient): + """Delete Webhook""" + cmd = deleteWebhook.deleteWebhookCmd() + cmd.id = self.id + apiclient.deleteWebhook(cmd) + + def update(self, apiclient, **kwargs): + """Update Webhook""" + + cmd = updateWebhook.updateWebhookCmd() + cmd.id = self.id + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + return apiclient.updateWebhook(cmd) + + def list_deliveries(self, apiclient, **kwargs): + """List Webhook Deliveries""" + + cmd = listWebhookDeliveries.listWebhookDeliveriesCmd() + cmd.webhookid = self.id + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + return apiclient.listWebhookDeliveries(cmd) + + def execute_delivery(self, apiclient, **kwargs): + """Execute Webhook Delivery""" + + cmd = executeWebhookDelivery.executeWebhookDeliveryCmd() + cmd.webhookid = self.id + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + return apiclient.executeWebhookDelivery(cmd) + + def delete_deliveries(self, apiclient, **kwargs): + """Delete Webhook Deliveries""" + + cmd = deleteWebhookDelivery.deleteWebhookDeliveryCmd() + cmd.webhookid = self.id + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + return apiclient.deleteWebhookDelivery(cmd) + + +class ZoneIpv4Subnet: + """Manage IPv4 Subnet for Zone""" + + def __init__(self, items): + self.__dict__.update(items) + + @classmethod + def create(cls, apiclient, zoneid, subnet, **kwargs): + """Create IPv4 Subnet for Zone""" + cmd = createIpv4SubnetForZone.createIpv4SubnetForZoneCmd() + cmd.zoneid = zoneid + cmd.subnet = subnet + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + return ZoneIpv4Subnet(apiclient.createIpv4SubnetForZone(cmd).__dict__) + + @classmethod + def list(cls, apiclient, **kwargs): + cmd = listIpv4SubnetsForZone.listIpv4SubnetsForZoneCmd() + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + return apiclient.listIpv4SubnetsForZone(cmd) + + def delete(self, apiclient): + """Delete IPv4 Subnet for Zone""" + cmd = deleteIpv4SubnetForZone.deleteIpv4SubnetForZoneCmd() + cmd.id = self.id + apiclient.deleteIpv4SubnetForZone(cmd) + + def update(self, apiclient, **kwargs): + """Update IPv4 Subnet for Zone""" + + cmd = updateIpv4SubnetForZone.updateIpv4SubnetForZoneCmd() + cmd.id = self.id + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + return apiclient.updateIpv4SubnetForZone(cmd) + + @classmethod + def dedicate(cls, apiclient, id, account=None, domainid=None, projectid=None): + """Dedicate IPv4 Subnet for Zone""" + + cmd = dedicateIpv4SubnetForZone.dedicateIpv4SubnetForZoneCmd() + cmd.id = id + cmd.account = account + cmd.domainid = domainid + cmd.projectid = projectid + return ZoneIpv4Subnet(apiclient.dedicateIpv4SubnetForZone(cmd).__dict__) + + def release(self, apiclient): + """Release IPv4 Subnet for Zone""" + + cmd = releaseIpv4SubnetForZone.releaseIpv4SubnetForZoneCmd() + cmd.id = self.id + return apiclient.releaseIpv4SubnetForZone(cmd) + +class Ipv4SubnetForGuestNetwork: + """Manage IPv4 Subnet for Guest Network""" + + def __init__(self, items): + self.__dict__.update(items) + + @classmethod + def create(cls, apiclient, parentid, subnet=None, cidrsize=None, **kwargs): + """Create IPv4 Subnet for Guest Network""" + cmd = createIpv4SubnetForGuestNetwork.createIpv4SubnetForGuestNetworkCmd() + cmd.parentid = parentid + if subnet: + cmd.subnet = subnet + if cidrsize: + cmd.cidrsize = cidrsize + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + return Ipv4SubnetForGuestNetwork(apiclient.createIpv4SubnetForGuestNetwork(cmd).__dict__) + + @classmethod + def list(cls, apiclient, **kwargs): + cmd = listIpv4SubnetsForGuestNetwork.listIpv4SubnetsForGuestNetworkCmd() + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + return apiclient.listIpv4SubnetsForGuestNetwork(cmd) + + def delete(self, apiclient): + """Delete IPv4 Subnet for Guest Network""" + cmd = deleteIpv4SubnetForGuestNetwork.deleteIpv4SubnetForGuestNetworkCmd() + cmd.id = self.id + apiclient.deleteIpv4SubnetForGuestNetwork(cmd) + + + +class RoutingFirewallRule: + """Manage IPv4 Routing Firewall rules""" + + def __init__(self, items): + self.__dict__.update(items) + + @classmethod + def create(cls, apiclient, services, networkid=None, protocol=None): + """Create IPv4 Routing Firewall rule""" + cmd = createRoutingFirewallRule.createRoutingFirewallRuleCmd() + + if "networkid" in services: + cmd.networkid = services["networkid"] + elif networkid: + cmd.networkid = networkid + + if "protocol" in services: + cmd.protocol = services["protocol"] + if services["protocol"] == 'ICMP': + cmd.icmptype = -1 + cmd.icmpcode = -1 + elif protocol: + cmd.protocol = protocol + + if "icmptype" in services: + cmd.icmptype = services["icmptype"] + if "icmpcode" in services: + cmd.icmpcode = services["icmpcode"] + + if "startport" in services: + cmd.startport = services["startport"] + if "endport" in services: + cmd.endport = services["endport"] + + if "cidrlist" in services: + cmd.cidrlist = services["cidrlist"] + if "destcidrlist" in services: + cmd.destcidrlist = services["destcidrlist"] + + if "traffictype" in services: + cmd.traffictype = services["traffictype"] + + return RoutingFirewallRule(apiclient.createRoutingFirewallRule(cmd).__dict__) + + @classmethod + def list(cls, apiclient, **kwargs): + cmd = listRoutingFirewallRules.listRoutingFirewallRulesCmd() + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + return apiclient.listIpv4SubnetsForGuestNetwork(cmd) + + def delete(self, apiclient): + """Delete IPv4 Routing Firewall rule""" + cmd = deleteRoutingFirewallRule.deleteRoutingFirewallRuleCmd() + cmd.id = self.id + apiclient.deleteRoutingFirewallRule(cmd) + + def update(self, apiclient, **kwargs): + """Update IPv4 Routing Firewall rule""" + cmd = updateRoutingFirewallRule.updateRoutingFirewallRuleCmd() + cmd.id = self.id + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + apiclient.updateRoutingFirewallRule(cmd) + + +class ASNRange: + """Manage ASN range for Guest Network""" + + def __init__(self, items): + self.__dict__.update(items) + + @classmethod + def create(cls, apiclient, zoneid, startasn, endasn, **kwargs): + """Create ASN range for Guest Network""" + cmd = createASNRange.createASNRangeCmd() + cmd.zoneid = zoneid + cmd.startasn = startasn + cmd.endasn = endasn + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + return ASNRange(apiclient.createASNRange(cmd).__dict__) + + @classmethod + def list(cls, apiclient, **kwargs): + cmd = listASNRanges.listASNRangesCmd() + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + return apiclient.listASNRanges(cmd) + + def delete(self, apiclient): + """Delete ASN range for Guest Network""" + cmd = deleteASNRange.deleteASNRangeCmd() + cmd.id = self.id + apiclient.deleteASNRange(cmd) + + @classmethod + def listAsNumbers(cls, apiclient, **kwargs): + """List AS numbers of an ASN range""" + cmd = listASNumbers.listASNumbersCmd() + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + return apiclient.listASNumbers(cmd) + + +class BgpPeer: + """Manage BGP Peers for Zone""" + + def __init__(self, items): + self.__dict__.update(items) + + @classmethod + def create(cls, apiclient, zoneid, asnumber, **kwargs): + """Create BGP Peer""" + cmd = createBgpPeer.createBgpPeerCmd() + cmd.zoneid = zoneid + cmd.asnumber = asnumber + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + return BgpPeer(apiclient.createBgpPeer(cmd).__dict__) + + @classmethod + def list(cls, apiclient, **kwargs): + cmd = listBgpPeers.listBgpPeersCmd() + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + return apiclient.listBgpPeers(cmd) + + def delete(self, apiclient): + """Delete BGP Peer""" + cmd = deleteBgpPeer.deleteBgpPeerCmd() + cmd.id = self.id + apiclient.deleteBgpPeer(cmd) + + def update(self, apiclient, **kwargs): + """Update BGP Peer""" + + cmd = updateBgpPeer.updateBgpPeerCmd() + cmd.id = self.id + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + return apiclient.updateBgpPeer(cmd) + + @classmethod + def dedicate(cls, apiclient, id, account=None, domainid=None, projectid=None): + """Dedicate BGP Peer""" + + cmd = dedicateBgpPeer.dedicateBgpPeerCmd() + cmd.id = id + cmd.account = account + cmd.domainid = domainid + cmd.projectid = projectid + return BgpPeer(apiclient.dedicateBgpPeer(cmd).__dict__) + + def release(self, apiclient): + """Release BGP Peer""" + + cmd = releaseBgpPeer.releaseBgpPeerCmd() + cmd.id = self.id + return apiclient.releaseBgpPeer(cmd) + + +class SharedFS: + + def __init__(self, items): + self.__dict__.update(items) + + """Manage Shared FileSystem""" + @classmethod + def create(cls, apiclient, services, name, description=None, account=None, domainid=None, projectid=None, + size=None, zoneid=None, diskofferingid=None, serviceofferingid=None, + filesystem=None, provider=None, networkid=None): + """Create Shared FileSystem""" + cmd = createSharedFileSystem.createSharedFileSystemCmd() + cmd.name = name + + if description: + cmd.description = description + if diskofferingid: + cmd.diskofferingid = diskofferingid + elif "diskofferingid" in services: + cmd.diskofferingid = services["diskofferingid"] + + if zoneid: + cmd.zoneid = zoneid + elif "zoneid" in services: + cmd.zoneid = services["zoneid"] + + if account: + cmd.account = account + elif "account" in services: + cmd.account = services["account"] + + if domainid: + cmd.domainid = domainid + elif "domainid" in services: + cmd.domainid = services["domainid"] + + if projectid: + cmd.projectid = projectid + + if size: + cmd.size = size + + if networkid: + cmd.networkid = networkid + elif "networkid" in services: + cmd.networkid = services["networkid"] + + if filesystem: + cmd.filesystem = filesystem + + if provider: + cmd.provider = provider + + if serviceofferingid: + cmd.serviceofferingid = serviceofferingid + elif "serviceofferingid" in services: + cmd.serviceofferingid = services["serviceofferingid"] + + return SharedFS(apiclient.createSharedFileSystem(cmd).__dict__) + + def delete(self, apiclient, expunge=True, forced=True): + """Delete Shared FileSystem""" + cmd = destroySharedFileSystem.destroySharedFileSystemCmd() + cmd.id = self.id + cmd.expunge = expunge + cmd.forced = forced + apiclient.destroySharedFileSystem(cmd) + + def stop(self, apiclient, forced=True): + """Stop Shared FileSystem""" + cmd = stopSharedFileSystem.stopSharedFileSystemCmd() + cmd.id = self.id + cmd.forced = forced + apiclient.stopSharedFileSystem(cmd) + + def start(self, apiclient): + """Start Shared FileSystem""" + cmd = startSharedFileSystem.startSharedFileSystemCmd() + cmd.id = self.id + apiclient.startSharedFileSystem(cmd) + + @classmethod + def list(cls, apiclient, **kwargs): + cmd = listSharedFileSystems.listSharedFileSystemCmd() + [setattr(cmd, k, v) for k, v in list(kwargs.items())] + return (apiclient.listSharedFileSystems(cmd)) + + def update(self, apiclient, name=None, description=None): + """Update Shared FileSystem""" + cmd = updateSharedFileSystem.updateSharedFileSystemCmd() + cmd.id = self.id + if name: + cmd.name = name + if description: + cmd.description = description + return (apiclient.updateSharedFileSystem(cmd)) + + def changediskoffering(self, apiclient, diskofferingid=None, size=None): + """Change Disk Offering/Size of the Shared FileSystem""" + cmd = changeSharedFileSystemDiskOffering.changeSharedFileSystemDiskOfferingCmd() + cmd.id = self.id + cmd.diskofferingid = diskofferingid + cmd.size = size + return (apiclient.changeSharedFileSystemDiskOffering(cmd)) diff --git a/tools/marvin/marvin/lib/ncc.py b/tools/marvin/marvin/lib/ncc.py index 3fbffe0ae2c..09ef26ea713 100755 --- a/tools/marvin/marvin/lib/ncc.py +++ b/tools/marvin/marvin/lib/ncc.py @@ -93,7 +93,7 @@ class NCC: elif isolation_policy.lower() == "dedicated": srv_pkg = self.createServicePackageDedicated(name, tnt_group_id, dv_group_id, isolation_policy ) else: - raise Exception("NS device must be either in shared or dedicated mode") + raise Exception("NS device must be either in shared or dedicated mode") if srv_pkg.status_code != 201: raise Exception("Error: %s" % self.__lastError) dev_add_res =self.addDevicetoServicePackage(dv_group_id, device_ip) diff --git a/tools/marvin/marvin/lib/utils.py b/tools/marvin/marvin/lib/utils.py index d3cbd421c2f..f80eccf1159 100644 --- a/tools/marvin/marvin/lib/utils.py +++ b/tools/marvin/marvin/lib/utils.py @@ -43,11 +43,11 @@ from marvin.codes import ( FAILED) def _configure_ssh_credentials(hypervisor): - ssh_command = "ssh -i ~/.ssh/id_rsa.cloud -ostricthostkeychecking=no " + ssh_command = "ssh -q -i ~/.ssh/id_rsa.cloud -ostricthostkeychecking=no " if (str(hypervisor).lower() == 'vmware' or str(hypervisor).lower() == 'hyperv'): - ssh_command = "ssh -i ~cloud/.ssh/id_rsa -ostricthostkeychecking=no " + ssh_command = "ssh -q -i ~cloud/.ssh/id_rsa -ostricthostkeychecking=no " return ssh_command diff --git a/tools/marvin/marvin/misc/build/kvm.properties b/tools/marvin/marvin/misc/build/kvm.properties index 1f372c6b424..28178c880cc 100644 --- a/tools/marvin/marvin/misc/build/kvm.properties +++ b/tools/marvin/marvin/misc/build/kvm.properties @@ -5,9 +5,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/tools/marvin/marvin/misc/build/xen.properties b/tools/marvin/marvin/misc/build/xen.properties index cffa28c55b9..ca76c38f53c 100644 --- a/tools/marvin/marvin/misc/build/xen.properties +++ b/tools/marvin/marvin/misc/build/xen.properties @@ -5,9 +5,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/tools/marvin/marvin/sandbox/advanced/setup.properties b/tools/marvin/marvin/sandbox/advanced/setup.properties index 73eacc938d4..89aa4616aa8 100644 --- a/tools/marvin/marvin/sandbox/advanced/setup.properties +++ b/tools/marvin/marvin/sandbox/advanced/setup.properties @@ -5,9 +5,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/tools/marvin/marvin/sandbox/advancedsg/setup.properties b/tools/marvin/marvin/sandbox/advancedsg/setup.properties index ee07ce23938..26f7e478913 100644 --- a/tools/marvin/marvin/sandbox/advancedsg/setup.properties +++ b/tools/marvin/marvin/sandbox/advancedsg/setup.properties @@ -5,9 +5,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/tools/marvin/marvin/sandbox/basic/setup.properties b/tools/marvin/marvin/sandbox/basic/setup.properties index 8833b507252..5b1aa90c8ae 100644 --- a/tools/marvin/marvin/sandbox/basic/setup.properties +++ b/tools/marvin/marvin/sandbox/basic/setup.properties @@ -5,9 +5,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/tools/marvin/marvin/sandbox/demo/simulator/simulatordemo.properties b/tools/marvin/marvin/sandbox/demo/simulator/simulatordemo.properties index 9d9f14b70a4..353ce044d12 100644 --- a/tools/marvin/marvin/sandbox/demo/simulator/simulatordemo.properties +++ b/tools/marvin/marvin/sandbox/demo/simulator/simulatordemo.properties @@ -5,9 +5,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY diff --git a/tools/marvin/mvn-setup.py b/tools/marvin/mvn-setup.py index 61bd2219238..cabcf0dc659 100755 --- a/tools/marvin/mvn-setup.py +++ b/tools/marvin/mvn-setup.py @@ -34,6 +34,8 @@ def replaceVersion(fname, version): with open(fname, 'r') as f: content = f.read() needle = '\nVERSION\s*=\s*[\'"][^\'"]*[\'"]' + # Ensure the version is PEP440 compliant + version = version.replace('-', '+', 1) replacement = '\nVERSION = "%s"' % version content = re.sub(needle, replacement, content, 1) with open(fname, 'w') as f: diff --git a/tools/marvin/pom.xml b/tools/marvin/pom.xml index df1186d18b7..ea963fc8da7 100644 --- a/tools/marvin/pom.xml +++ b/tools/marvin/pom.xml @@ -59,7 +59,7 @@ org.codehaus.mojo exec-maven-plugin - 1.2.1 + ${cs.exec-maven-plugin.version} generate-sources @@ -113,7 +113,7 @@ org.codehaus.mojo exec-maven-plugin - 1.2.1 + ${cs.exec-maven-plugin.version} generate-sources @@ -177,9 +177,16 @@ - org.codehaus.gmaven - gmaven-plugin - 1.5 + org.codehaus.gmavenplus + gmavenplus-plugin + ${cs.gmavenplus.version} + + + org.codehaus.groovy + groovy-all + ${cs.groovy.version} + + setproperty @@ -188,10 +195,12 @@ execute - - pom.properties['resolved.basedir']=project.basedir.absolutePath.replace('\','/').replace('D:','/cyg/d'); - pom.properties['resolved.userdir']='${user.dir}'.replace('\','/').replace('D:','/cyg/d'); - + + + @@ -199,7 +208,7 @@ org.codehaus.mojo exec-maven-plugin - 1.2.1 + ${cs.exec-maven-plugin.version} pre-integration-test @@ -234,9 +243,16 @@ - org.codehaus.gmaven - gmaven-plugin - 1.5 + org.codehaus.gmavenplus + gmavenplus-plugin + ${cs.gmavenplus.version} + + + org.codehaus.groovy + groovy-all + ${cs.groovy.version} + + setproperty @@ -249,10 +265,12 @@ ${user.dir} ${marvin.config} - - project.properties['resolved.user.dir']='${user.dir}'.replace('\','/').replace('D:','/cyg/d'); - project.properties['resolved.marvin.config']='${marvin.config}'.replace('\','/').replace('D:','/cyg/d'); - + + + @@ -260,7 +278,7 @@ org.codehaus.mojo exec-maven-plugin - 1.2.1 + ${cs.exec-maven-plugin.version} integration-test diff --git a/tools/transifex/README-transifex.txt b/tools/transifex/README-transifex.txt index 7079331f848..ca02f88eb99 100644 --- a/tools/transifex/README-transifex.txt +++ b/tools/transifex/README-transifex.txt @@ -45,13 +45,13 @@ with the translatons from traductors. ===== The sync-transifex-ui provide too the ability to : -* Download from Transifex the source language resource files. Be carrefully, +* Download from Transifex the source language resource files. Be carrefully, with this, you can remove some transaction on Transifex if some keys has been removed inside the source language resource files. ./sync-transifex-ui.sh download-source-language CloudStack_UI.410_messagesjson -* Upload the L10N resource files on Transifex. +* Upload the L10N resource files on Transifex. ./sync-transifex-ui.sh upload-l10n-languages CloudStack_UI.410_messagesjson diff --git a/tools/whisker/descriptor-for-packaging.xml b/tools/whisker/descriptor-for-packaging.xml index 41423332b7b..1969f68936a 100644 --- a/tools/whisker/descriptor-for-packaging.xml +++ b/tools/whisker/descriptor-for-packaging.xml @@ -865,7 +865,7 @@ EXHIBIT A -Mozilla Public License. The Original Code is ${PROJECT}. The Initial Developer of the Original Code is ${INITIAL_DEVELOPER}. - Portions created by ${INITIAL_DEVELOPER} are Copyright (C) + Portions created by ${INITIAL_DEVELOPER} are Copyright (C) ${INITIAL_DEVELOPER_COPYRIGHT}. All Rights Reserved. Contributor(s): ${CONTRIBUTORS}. @@ -888,9 +888,9 @@ EXHIBIT A -Mozilla Public License. - Please note: our license is an adaptation of the MIT X11 License and should be @@ -917,7 +917,7 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - Permission is hereby granted, free of charge, to any person obtaining a copy of @@ -939,9 +939,9 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - Eclipse Distribution License Version 1.0 @@ -973,7 +973,7 @@ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - Permission is hereby granted, free of charge, to any person obtaining @@ -1066,7 +1066,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 3. Neither the name of the copyright holders nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE @@ -1080,7 +1080,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE POSSIBILITY OF SUCH DAMAGE. - + + + + @@ -64,11 +68,20 @@ export default { } }, inject: ['parentFetchData'], + computed: { + columns () { + if (this.volumes?.[0]) { + return this.allColumns.filter(col => col.dataIndex in this.volumes[0]) + } + return this.allColumns.filter(col => this.defaultColumns.includes(col.dataIndex)) + } + }, data () { return { vm: {}, volumes: [], - volumeColumns: [ + defaultColumns: ['name', 'state', 'type', 'size'], + allColumns: [ { key: 'name', title: this.$t('label.name'), @@ -87,6 +100,11 @@ export default { key: 'size', title: this.$t('label.size'), dataIndex: 'size' + }, + { + key: 'storage', + title: this.$t('label.storage'), + dataIndex: 'storage' } ] } diff --git a/ui/src/components/view/WebhookDeliveriesTab.vue b/ui/src/components/view/WebhookDeliveriesTab.vue new file mode 100644 index 00000000000..c13b0453254 --- /dev/null +++ b/ui/src/components/view/WebhookDeliveriesTab.vue @@ -0,0 +1,526 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + + + + + + diff --git a/ui/src/components/view/stats/ResourceStatsInfo.vue b/ui/src/components/view/stats/ResourceStatsInfo.vue index 9db3384bc5f..6898141bdfc 100644 --- a/ui/src/components/view/stats/ResourceStatsInfo.vue +++ b/ui/src/components/view/stats/ResourceStatsInfo.vue @@ -45,7 +45,8 @@ export default { { resourceType: 'CHART', messageList: [ - this.$t('message.chart.statistic.info') + this.$t('message.chart.statistic.info'), + this.$t('message.chart.statistic.info.hypervisor.additionals') ] }, { diff --git a/ui/src/components/widgets/Console.vue b/ui/src/components/widgets/Console.vue index ae0a034de02..6c16c7546a7 100644 --- a/ui/src/components/widgets/Console.vue +++ b/ui/src/components/widgets/Console.vue @@ -56,17 +56,10 @@ export default { this.url = (json && json.createconsoleendpointresponse) ? json.createconsoleendpointresponse.consoleendpoint.url : '#/exception/404' if (json.createconsoleendpointresponse.consoleendpoint.success) { if (this.copyUrlToClipboard) { + this.$copyText(this.url) this.$message.success({ content: this.$t('label.copied.clipboard') }) - const hiddenElement = document.createElement('textarea') - hiddenElement.value = this.url - document.body.appendChild(hiddenElement) - hiddenElement.focus() - hiddenElement.select() - - document.execCommand('copy') - document.body.removeChild(hiddenElement) } else { window.open(this.url, '_blank') } diff --git a/ui/src/components/widgets/CopyLabel.vue b/ui/src/components/widgets/CopyLabel.vue index 650f678206e..c5216cc256d 100644 --- a/ui/src/components/widgets/CopyLabel.vue +++ b/ui/src/components/widgets/CopyLabel.vue @@ -18,7 +18,7 @@ @@ -32,6 +32,10 @@ export default { type: String, default: '' }, + copyValue: { + type: String, + default: '' + }, tooltip: { type: String, default: '' @@ -39,6 +43,10 @@ export default { tooltipPlacement: { type: String, default: 'top' + }, + showIcon: { + type: Boolean, + default: false } } } diff --git a/ui/src/components/widgets/ResourceLabel.vue b/ui/src/components/widgets/ResourceLabel.vue index 6e44ce54a5d..0a10843ff86 100644 --- a/ui/src/components/widgets/ResourceLabel.vue +++ b/ui/src/components/widgets/ResourceLabel.vue @@ -18,7 +18,13 @@ -
    -

    {{ $t('label.accounttype') }}

    - - {{ $t('label.account') }} - {{ $t('label.project') }} - -
    - -
    -

    *{{ $t('label.domain') }}

    - - - - - - {{ domain.path || domain.name || domain.description }} - - - -
    - - - - +

    {{ $t('label.network') }}

    @@ -146,6 +67,7 @@ diff --git a/ui/src/views/compute/wizard/SecurityGroupSelection.vue b/ui/src/views/compute/wizard/SecurityGroupSelection.vue index 51f96a569ad..87bf62ddac2 100644 --- a/ui/src/views/compute/wizard/SecurityGroupSelection.vue +++ b/ui/src/views/compute/wizard/SecurityGroupSelection.vue @@ -94,7 +94,7 @@ export default { } ], items: [], - selectedRowKeys: [], + selectedRowKeys: this?.preFillContent?.securitygroupids || [], page: 1, pageSize: 10, keyword: null, diff --git a/ui/src/views/compute/wizard/TemplateIsoRadioGroup.vue b/ui/src/views/compute/wizard/TemplateIsoRadioGroup.vue index 00fa2ef9a25..1cd1fa5f291 100644 --- a/ui/src/views/compute/wizard/TemplateIsoRadioGroup.vue +++ b/ui/src/views/compute/wizard/TemplateIsoRadioGroup.vue @@ -46,6 +46,9 @@ :os-name="item.osName" />   {{ item.displaytext }} + + | {{ item.project }} + diff --git a/ui/src/views/dashboard/CapacityDashboard.vue b/ui/src/views/dashboard/CapacityDashboard.vue index 2fc41b6c1be..dae53cf0015 100644 --- a/ui/src/views/dashboard/CapacityDashboard.vue +++ b/ui/src/views/dashboard/CapacityDashboard.vue @@ -184,7 +184,7 @@
    - + - + - + - + - + - + - + - + - +