Compare commits

..

No commits in common. "main" and "4.20.3.0" have entirely different histories.

3521 changed files with 130802 additions and 307990 deletions

View File

@ -50,16 +50,21 @@ github:
rebase: false rebase: false
collaborators: collaborators:
- ingox - acs-robot
- rajujith
- GaOrtiga
- SadiJr
- winterhazel
- gpordeus - gpordeus
- erikbocks - hsato03
- Imvedansh - bernardodemarco
- Damans227 - abh1sar
- FelipeM525
protected_branches: ~ protected_branches: ~
notifications: notifications:
commits: commits@cloudstack.apache.org commits: commits@cloudstack.apache.org
issues: commits@cloudstack.apache.org issues: commits@cloudstack.apache.org
pullrequests: commits@cloudstack.apache.org pullrequests: commits@cloudstack.apache.org
discussions: users@cloudstack.apache.org discussions: users@cloudstack.apache.org

View File

@ -1,20 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
[codespell]
ignore-words = .github/linters/codespell.txt
skip = systemvm/agent/noVNC/*,ui/package.json,ui/package-lock.json,ui/public/js/less.min.js,ui/public/locales/*.json,server/src/test/java/org/apache/cloudstack/network/ssl/CertServiceTest.java,test/integration/smoke/test_ssl_offloading.py

1
.gitattributes vendored
View File

@ -1 +0,0 @@
.github/workflows/*.lock.yml linguist-generated=true merge=ours

25
.github/CODEOWNERS vendored
View File

@ -1,25 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
/plugins/storage/volume/linstor @rp-
/plugins/storage/volume/storpool @slavkap
/plugins/storage/volume/ontap @rajiv1 @sandeeplocharla @piyush5 @suryag
.pre-commit-config.yaml @jbampton
/.github/linters/ @jbampton
/plugins/network-elements/nsx/ @Pearl1594 @nvazquez

View File

@ -1,46 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
description: "Thank you for reporting a bug!"
name: bug
title: "[SHORT PROBLEM DESCRIPTION]"
labels: bug, needs-triageing
body:
- type: markdown
attributes:
value: "## Welcome, please describe your problem below;"
- type: textarea
attributes:
label: problem
value: The long description of your problem
- type: markdown
attributes:
value: "## What versions of cloudstack and any infra components are you using"
- type: textarea
attributes:
label: versions
value: The versions of ACS, hypervisors, storage, network etc..
- type: textarea
attributes:
label: The steps to reproduce the bug
value: |
1.
2.
3.
...
- type: textarea
attributes:
label: "What to do about it?"

View File

@ -1,25 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
description: "Thank you for your new feature idea!"
name: feature
title: "[SHORT FUNCTIONAL DESCRIPTION]"
labels: new
body:
- type: textarea
attributes:
label: "The required feature described as a wish"
value: As a User/Admin/Operator I would like to , ... have the system make my morning coffee.

View File

@ -1,5 +0,0 @@
# Mark all cached import files as generated
* linguist-generated=true
# Use 'ours' merge strategy to keep local cached versions
* merge=ours

View File

@ -1,73 +0,0 @@
---
# Report formatting guidelines
---
## Report Structure Guidelines
### 1. Header Levels
**Use h3 (###) or lower for all headers in your issue report to maintain proper document hierarchy.**
When creating GitHub issues or discussions:
- Use `###` (h3) for main sections (e.g., "### Test Summary")
- Use `####` (h4) for subsections (e.g., "#### Device-Specific Results")
- Never use `##` (h2) or `#` (h1) in reports - these are reserved for titles
### 2. Progressive Disclosure
**Wrap detailed test results in `<details><summary><b>Section Name</b></summary>` tags to improve readability and reduce scrolling.**
Use collapsible sections for:
- Verbose details (full test logs, raw data)
- Secondary information (minor warnings, extra context)
- Per-item breakdowns when there are many items
Always keep critical information visible (summary, critical issues, key metrics).
### 3. Report Structure Pattern
1. **Overview**: 1-2 paragraphs summarizing key findings
2. **Critical Information**: Show immediately (summary stats, critical issues)
3. **Details**: Use `<details><summary><b>Section Name</b></summary>` for expanded content
4. **Context**: Add helpful metadata (workflow run, date, trigger)
### Design Principles (Airbnb-Inspired)
Reports should:
- **Build trust through clarity**: Most important info immediately visible
- **Exceed expectations**: Add helpful context like trends, comparisons
- **Create delight**: Use progressive disclosure to reduce overwhelm
- **Maintain consistency**: Follow patterns across all reports
### Example Report Structure
```markdown
### Summary
- Key metric 1: value
- Key metric 2: value
- Status: ✅/⚠️/❌
### Critical Issues
[Always visible - these are important]
<details>
<summary><b>View Detailed Results</b></summary>
[Comprehensive details, logs, traces]
</details>
<details>
<summary><b>View All Warnings</b></summary>
[Minor issues and potential problems]
</details>
### Recommendations
[Actionable next steps - keep visible]
```
## Workflow Run References
- Format run IDs as links: `[§12345](https://github.com/owner/repo/actions/runs/12345)`
- Include up to 3 most relevant run URLs at end under `**References:**`
- Do NOT add footer attribution (system adds automatically)

View File

@ -1,41 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
open-pull-requests-limit: 2
schedule:
interval: "weekly"
groups:
github-actions-dependencies:
patterns:
- "*"
cooldown:
default-days: 7
- package-ecosystem: "maven"
directory: "/"
schedule:
interval: "daily"
cooldown:
default-days: 7

View File

@ -18,21 +18,36 @@
# MD001/heading-increment Heading levels should only increment by one level at a time # MD001/heading-increment Heading levels should only increment by one level at a time
MD001: false MD001: false
# MD003/heading-style Heading style
MD003: false
# MD004/ul-style Unordered list style # MD004/ul-style Unordered list style
MD004: false MD004: false
# MD007/ul-indent Unordered list indentation # MD007/ul-indent Unordered list indentation
MD007: false MD007: false
# MD009/no-trailing-spaces Trailing spaces
MD009: false
# MD010/no-hard-tabs Hard tabs # MD010/no-hard-tabs Hard tabs
MD010: false MD010: false
# MD012/no-multiple-blanks Multiple consecutive blank lines
MD012: false
# MD013/line-length Line length # MD013/line-length Line length
MD013: false MD013: false
# MD014/commands-show-output Dollar signs used before commands without showing output # MD014/commands-show-output Dollar signs used before commands without showing output
MD014: false MD014: false
# MD018/no-missing-space-atx No space after hash on atx style heading
MD018: false
# MD019/no-multiple-space-atx Multiple spaces after hash on atx style heading
MD019: false
# MD022/blanks-around-headings Headings should be surrounded by blank lines # MD022/blanks-around-headings Headings should be surrounded by blank lines
MD022: false MD022: false
@ -83,6 +98,3 @@ MD046: false
# MD052/reference-links-images Reference links and images should use a label that is defined # MD052/reference-links-images Reference links and images should use a label that is defined
MD052: false MD052: false
# MD059/descriptive-link-text Link text should be descriptive
MD059: false

View File

@ -1,32 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
---
extends: default
rules:
line-length:
max: 400 # Very forgiving for GitHub Actions and infrastructure files
indentation: disable # Disable indentation checking for existing files
comments: disable # Disable comment formatting checks
braces: disable
brackets: disable # Disable bracket spacing checks
colons:
max-spaces-after: -1 # Allow any number of spaces after colon
max-spaces-before: 0
document-start: disable # Many files don't have ---
truthy:
allowed-values: ['true', 'false', 'on', 'off', 'yes', 'no']

View File

@ -4,7 +4,6 @@ acount
actuall actuall
acuiring acuiring
acumulate acumulate
addin
addreess addreess
addtion addtion
adminstrator adminstrator
@ -13,8 +12,10 @@ afrer
afterall afterall
againt againt
ags ags
aktive
algoritm algoritm
allo allo
alloacate
allocted allocted
alocation alocation
alogrithm alogrithm
@ -64,7 +65,6 @@ bject
boardcast boardcast
bootstraper bootstraper
bu bu
callin
cant cant
capabilites capabilites
capablity capablity
@ -73,7 +73,6 @@ carrefully
cavaet cavaet
chaing chaing
checkd checkd
checkin
childs childs
choosen choosen
chould chould
@ -94,6 +93,7 @@ confg
configruation configruation
configuable configuable
conneciton conneciton
connexion
constrait constrait
constraits constraits
containg containg
@ -101,7 +101,9 @@ contex
continuesly continuesly
contro contro
controler controler
controles
controll controll
convienient
convinience convinience
coputer coputer
correcponding correcponding
@ -156,13 +158,13 @@ differnet
differnt differnt
direcotry direcotry
directroy directroy
disale
disbale disbale
discrepency discrepency
disover disover
dissapper dissapper
dissassociated dissassociated
divice divice
dockin
doesn' doesn'
doesnot doesnot
doesnt doesnt
@ -173,6 +175,7 @@ eanbled
earch earch
ect ect
elemnt elemnt
eles
elments elments
emmited emmited
enble enble
@ -180,23 +183,29 @@ encryted
enebled enebled
enmpty enmpty
entires entires
enviornment
environmnet environmnet
equivalant equivalant
erro erro
erronous erronous
everthing
everytime everytime
excetion
excption
excute excute
execept execept
execption execption
exects
execut execut
executeable executeable
exeeded exeeded
exisitng exisitng
exisits exisits
existin
existsing existsing
exitting
expcted expcted
expection expection
explaination
explicitely explicitely
faield faield
faild faild
@ -209,6 +218,7 @@ fillled
findout findout
fisrt fisrt
fo fo
folowing
fowarding fowarding
frist frist
fro fro
@ -227,7 +237,6 @@ hanling
happend happend
hasing hasing
hasnt hasnt
havin
hda hda
hostanme hostanme
hould hould
@ -247,14 +256,20 @@ implmeneted
implmentation implmentation
incase incase
includeing includeing
incosistency
indecates indecates
indien
infor infor
informations informations
informaton informaton
infrastrcuture
ingore ingore
inital
initalize initalize
initator initator
initilization
inspite inspite
instace
instal instal
instnace instnace
intefaces intefaces
@ -272,8 +287,12 @@ ist
klunky klunky
lable lable
leve leve
lief
limite limite
linke
listner listner
lokal
lokales
maintainence maintainence
maintenace maintenace
maintenence maintenence
@ -282,6 +301,7 @@ mambers
manaully manaully
manuel manuel
maxium maxium
mehtod
mergable mergable
mesage mesage
messge messge
@ -291,6 +311,7 @@ minumum
mis mis
modifers modifers
mor mor
mot
mulitply mulitply
multipl multipl
multple multple
@ -304,7 +325,7 @@ nin
nodel nodel
nome nome
noone noone
notin nowe
numbe numbe
numer numer
occured occured
@ -357,7 +378,6 @@ propogate
provison provison
psudo psudo
pyhsical pyhsical
re-use
readabilty readabilty
readd readd
reccuring reccuring
@ -372,9 +392,12 @@ remaning
remore remore
remvoing remvoing
renabling renabling
repeatly
reponse reponse
reqest reqest
reqiured reqiured
requieres
requried
reserv reserv
reserverd reserverd
reseted reseted
@ -391,15 +414,17 @@ retriving
retrun retrun
retuned retuned
returing returing
re-use
rever rever
rocessor rocessor
roperty
runing runing
runnign runnign
sate sate
scalled scalled
scipt
scirpt scirpt
scrip scrip
seconadry
seconday seconday
seesion seesion
sepcified sepcified
@ -412,10 +437,12 @@ settig
sevices sevices
shoul shoul
shoule shoule
sie
signle signle
simplier simplier
singature singature
skiping skiping
snaphsot
snpashot snpashot
specied specied
specifed specifed
@ -426,6 +453,7 @@ standy
statics statics
stickyness stickyness
stil stil
stip
storeage storeage
strat strat
streched streched
@ -434,6 +462,7 @@ succesfull
successfull successfull
suceessful suceessful
suces suces
sucessfully
suiteable suiteable
suppots suppots
suppport suppport
@ -466,6 +495,7 @@ uncompressible
uneccessarily uneccessarily
unexepected unexepected
unexpect unexpect
unknow
unkonw unkonw
unkown unkown
unneccessary unneccessary
@ -473,12 +503,14 @@ unparseable
unrecoginized unrecoginized
unsupport unsupport
unxpected unxpected
updat
uptodate uptodate
usera usera
usign usign
usin usin
utlization utlization
vaidate vaidate
valiate
valule valule
valus valus
varibles varibles
@ -487,6 +519,8 @@ verfying
verifing verifing
virutal virutal
visable visable
wakup
wil wil
wit wit
wll
wth wth

View File

@ -30,17 +30,18 @@ jobs:
build: build:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
steps: steps:
- uses: actions/checkout@v6 - uses: actions/checkout@v4
- name: Set up JDK 17 - name: Set up JDK 11
uses: actions/setup-java@v5 uses: actions/setup-java@v4
with: with:
distribution: 'temurin' java-version: '11'
java-version: '17' distribution: 'adopt'
cache: 'maven' architecture: x64
cache: maven
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v6 uses: actions/setup-python@v5
with: with:
python-version: '3.10' python-version: '3.10'
architecture: 'x64' architecture: 'x64'

View File

@ -89,10 +89,7 @@ jobs:
smoke/test_nested_virtualization smoke/test_nested_virtualization
smoke/test_set_sourcenat smoke/test_set_sourcenat
smoke/test_webhook_lifecycle smoke/test_webhook_lifecycle
smoke/test_purge_expunged_vms smoke/test_purge_expunged_vms",
smoke/test_extension_lifecycle
smoke/test_extension_custom_action_lifecycle
smoke/test_extension_custom",
"smoke/test_network "smoke/test_network
smoke/test_network_acl smoke/test_network_acl
smoke/test_network_ipv6 smoke/test_network_ipv6
@ -140,13 +137,11 @@ jobs:
smoke/test_vm_deployment_planner smoke/test_vm_deployment_planner
smoke/test_vm_strict_host_tags smoke/test_vm_strict_host_tags
smoke/test_vm_schedule smoke/test_vm_schedule
smoke/test_deploy_vgpu_enabled_vm
smoke/test_vm_life_cycle smoke/test_vm_life_cycle
smoke/test_vm_lifecycle_unmanage_import smoke/test_vm_lifecycle_unmanage_import
smoke/test_vm_snapshot_kvm smoke/test_vm_snapshot_kvm
smoke/test_vm_snapshots smoke/test_vm_snapshots
smoke/test_volumes smoke/test_volumes
smoke/test_vpc_conserve_mode
smoke/test_vpc_ipv6 smoke/test_vpc_ipv6
smoke/test_vpc_redundant smoke/test_vpc_redundant
smoke/test_vpc_router_nics smoke/test_vpc_router_nics
@ -169,8 +164,7 @@ jobs:
component/test_cpu_limits component/test_cpu_limits
component/test_cpu_max_limits component/test_cpu_max_limits
component/test_cpu_project_limits component/test_cpu_project_limits
component/test_deploy_vm_userdata_multi_nic component/test_deploy_vm_userdata_multi_nic",
component/test_deploy_vm_lease",
"component/test_egress_fw_rules "component/test_egress_fw_rules
component/test_invalid_gw_nm component/test_invalid_gw_nm
component/test_ip_reservation", component/test_ip_reservation",
@ -217,19 +211,20 @@ jobs:
smoke/test_list_volumes"] smoke/test_list_volumes"]
steps: steps:
- uses: actions/checkout@v6 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up JDK 17 - name: Set up JDK
uses: actions/setup-java@v5 uses: actions/setup-java@v4
with: with:
distribution: 'temurin' java-version: '11'
java-version: '17' distribution: 'adopt'
cache: 'maven' architecture: x64
cache: maven
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v6 uses: actions/setup-python@v5
with: with:
python-version: '3.10' python-version: '3.10'
architecture: 'x64' architecture: 'x64'
@ -341,7 +336,7 @@ jobs:
echo -e "Simulator CI Test Results: (only failures listed)\n" echo -e "Simulator CI Test Results: (only failures listed)\n"
python3 ./tools/marvin/xunit-reader.py integration-test-results/ python3 ./tools/marvin/xunit-reader.py integration-test-results/
- uses: codecov/codecov-action@v6 - uses: codecov/codecov-action@v4
with: with:
files: jacoco-coverage.xml files: jacoco-coverage.xml
fail_ci_if_error: true fail_ci_if_error: true

View File

@ -32,12 +32,12 @@ jobs:
name: codecov name: codecov
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
steps: steps:
- uses: actions/checkout@v6 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up JDK 17 - name: Set up JDK 17
uses: actions/setup-java@v5 uses: actions/setup-java@v4
with: with:
distribution: 'temurin' distribution: 'temurin'
java-version: '17' java-version: '17'
@ -49,7 +49,7 @@ jobs:
cd nonoss && bash -x install-non-oss.sh && cd .. cd nonoss && bash -x install-non-oss.sh && cd ..
mvn -P quality -Dsimulator -Dnoredist clean install -T$(nproc) mvn -P quality -Dsimulator -Dnoredist clean install -T$(nproc)
- uses: codecov/codecov-action@v6 - uses: codecov/codecov-action@v4
with: with:
files: ./client/target/site/jacoco-aggregate/jacoco.xml files: ./client/target/site/jacoco-aggregate/jacoco.xml
fail_ci_if_error: true fail_ci_if_error: true

View File

@ -1,48 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
name: CodeQL Analysis
on:
push:
branches: [main]
pull_request:
branches: [main]
permissions:
actions: read
contents: read
security-events: write
jobs:
codeql:
name: CodeQL
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
language: ["actions"]
steps:
- name: Checkout repository
uses: actions/checkout@v6
- name: Initialize CodeQL
uses: github/codeql-action/init@v4
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@v4
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v4
with:
category: "Security"

File diff suppressed because it is too large Load Diff

View File

@ -1,54 +0,0 @@
---
description: |
This workflow creates daily repo status reports. It gathers recent repository
activity (issues, PRs, discussions, releases, code changes) and generates
engaging GitHub issues with productivity insights, community highlights,
and project recommendations.
on:
schedule: daily
workflow_dispatch:
permissions:
contents: read
issues: read
pull-requests: read
network: defaults
tools:
github:
# If in a public repo, setting `lockdown: false` allows
# reading issues, pull requests and comments from 3rd-parties
# If in a private repo this has no particular effect.
lockdown: false
safe-outputs:
create-issue:
title-prefix: "[repo-status] "
labels: [report, daily-status]
source: githubnext/agentics/workflows/daily-repo-status.md@d19056381ba48cb1f7c78510c23069701fa7ae87
---
# Daily Repo Status
Create an upbeat daily status report for the repo as a GitHub issue.
## What to include
- Recent repository activity (issues, PRs, discussions, releases, code changes)
- Progress tracking, goal reminders and highlights
- Project status and recommendations
- Actionable next steps for maintainers
## Style
- Be positive, encouraging, and helpful 🌟
- Use emojis moderately for engagement
- Keep it concise - adjust length based on actual activity
## Process
1. Gather recent activity from the repository
2. Study the repository, its issues and its pull requests
3. Create a new GitHub issue with your findings and insights

View File

@ -38,7 +38,7 @@ jobs:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
steps: steps:
- name: Login to Docker Registry - name: Login to Docker Registry
uses: docker/login-action@v4 uses: docker/login-action@v2
with: with:
registry: ${{ secrets.DOCKER_REGISTRY }} registry: ${{ secrets.DOCKER_REGISTRY }}
username: ${{ secrets.DOCKERHUB_USER }} username: ${{ secrets.DOCKERHUB_USER }}
@ -47,7 +47,7 @@ jobs:
- name: Set Docker repository name - name: Set Docker repository name
run: echo "DOCKER_REPOSITORY=apache" >> $GITHUB_ENV run: echo "DOCKER_REPOSITORY=apache" >> $GITHUB_ENV
- uses: actions/checkout@v6 - uses: actions/checkout@v4
- name: Set ACS version - name: Set ACS version
run: echo "ACS_VERSION=$(grep '<version>' pom.xml | head -2 | tail -1 | cut -d'>' -f2 |cut -d'<' -f1)" >> $GITHUB_ENV run: echo "ACS_VERSION=$(grep '<version>' pom.xml | head -2 | tail -1 | cut -d'>' -f2 |cut -d'<' -f1)" >> $GITHUB_ENV

File diff suppressed because it is too large Load Diff

View File

@ -1,78 +0,0 @@
---
on:
schedule: 0 14 * * 1-5
workflow_dispatch: null
permissions:
issues: read
imports:
- github/gh-aw/.github/workflows/shared/reporting.md@94662b1dee8ce96c876ba9f33b3ab8be32de82a4
safe-outputs:
add-comment: {}
add-labels:
allowed:
- bug
- feature
- enhancement
- documentation
- question
- help-wanted
- good-first-issue
source: github/gh-aw/.github/workflows/issue-triage-agent.md@94662b1dee8ce96c876ba9f33b3ab8be32de82a4
strict: true
timeout-minutes: 5
tools:
github:
toolsets:
- issues
- labels
---
# Issue Triage Agent
List open issues in ${{ github.repository }} that have no labels. For each unlabeled issue, analyze the title and body, then add one of the allowed labels: `bug`, `feature`, `enhancement`, `documentation`, `question`, `help-wanted`, or `good-first-issue`.
Skip issues that:
- Already have any of these labels
- Have been assigned to any user (especially non-bot users)
After adding the label to an issue, mention the issue author in a comment using this format (follow shared/reporting.md guidelines):
**Comment Template**:
```markdown
### 🏷️ Issue Triaged
Hi @{author}! I've categorized this issue as **{label_name}** based on the following analysis:
**Reasoning**: {brief_explanation_of_why_this_label}
<details>
<summary><b>View Triage Details</b></summary>
#### Analysis
- **Keywords detected**: {list_of_keywords_that_matched}
- **Issue type indicators**: {what_made_this_fit_the_category}
- **Confidence**: {High/Medium/Low}
#### Recommended Next Steps
- {context_specific_suggestion_1}
- {context_specific_suggestion_2}
</details>
**References**: [Triage run §{run_id}](https://github.com/github/gh-aw/actions/runs/{run_id})
```
**Key formatting requirements**:
- Use h3 (###) for the main heading
- Keep reasoning visible for quick understanding
- Wrap detailed analysis in `<details>` tags
- Include workflow run reference
- Keep total comment concise (collapsed details prevent noise)
## Batch Comment Optimization
For efficiency, if multiple issues are triaged in a single run:
1. Add individual labels to each issue
2. Add a brief comment to each issue (using the template above)
3. Optionally: Create a discussion summarizing all triage actions for that run
This provides both per-issue context and batch visibility.

View File

@ -1,16 +0,0 @@
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.

View File

@ -15,7 +15,7 @@
# specific language governing permissions and limitations # specific language governing permissions and limitations
# under the License. # under the License.
name: pre-commit name: Lint
on: [pull_request] on: [pull_request]
@ -32,18 +32,16 @@ jobs:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
steps: steps:
- name: Check Out - name: Check Out
uses: actions/checkout@v6 uses: actions/checkout@v4
- name: Install - name: Install
run: | run: |
python -m pip install --upgrade pip python -m pip install --upgrade pip
pip install pre-commit pip install pre-commit
- name: Set PY - name: Set PY
run: echo "PY=$(python -VV | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV run: echo "PY=$(python -VV | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV
- uses: actions/cache@v5 - uses: actions/cache@v4
with: with:
path: ~/.cache/pre-commit path: ~/.cache/pre-commit
key: pre-commit|${{ env.PY }}|${{ hashFiles('.pre-commit-config.yaml') }} key: pre-commit|${{ env.PY }}|${{ hashFiles('.pre-commit-config.yaml') }}
- name: Run pre-commit - name: Run pre-commit
run: pre-commit run --color=always --all-files run: pre-commit run --all-files
- name: Run manual pre-commit hooks
run: pre-commit run --color=always --all-files --hook-stage manual

View File

@ -32,26 +32,26 @@ jobs:
name: Main Sonar JaCoCo Build name: Main Sonar JaCoCo Build
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
steps: steps:
- uses: actions/checkout@v6 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up JDK17 - name: Set up JDK17
uses: actions/setup-java@v5 uses: actions/setup-java@v4
with: with:
distribution: 'temurin' distribution: 'temurin'
java-version: '17' java-version: '17'
cache: 'maven' cache: 'maven'
- name: Cache SonarCloud packages - name: Cache SonarCloud packages
uses: actions/cache@v5 uses: actions/cache@v4
with: with:
path: ~/.sonar/cache path: ~/.sonar/cache
key: ${{ runner.os }}-sonar key: ${{ runner.os }}-sonar
restore-keys: ${{ runner.os }}-sonar restore-keys: ${{ runner.os }}-sonar
- name: Cache local Maven repository - name: Cache local Maven repository
uses: actions/cache@v5 uses: actions/cache@v4
with: with:
path: ~/.m2/repository path: ~/.m2/repository
key: ${{ runner.os }}-m2-${{ hashFiles('pom.xml', '*/pom.xml', '*/*/pom.xml', '*/*/*/pom.xml') }} key: ${{ runner.os }}-m2-${{ hashFiles('pom.xml', '*/pom.xml', '*/*/pom.xml', '*/*/*/pom.xml') }}

View File

@ -35,7 +35,7 @@ jobs:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
steps: steps:
- name: Conflict Check - name: Conflict Check
uses: eps1lon/actions-label-merge-conflict@v3.0.3 uses: eps1lon/actions-label-merge-conflict@v2.0.0
with: with:
repoToken: "${{ secrets.GITHUB_TOKEN }}" repoToken: "${{ secrets.GITHUB_TOKEN }}"
dirtyLabel: "status:has-conflicts" dirtyLabel: "status:has-conflicts"

View File

@ -30,9 +30,9 @@ jobs:
build: build:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
steps: steps:
- uses: actions/checkout@v6 - uses: actions/checkout@v4
- name: Set up JDK 17 - name: Set up JDK 17
uses: actions/setup-java@v5 uses: actions/setup-java@v4
with: with:
java-version: '17' java-version: '17'
distribution: 'adopt' distribution: 'adopt'

View File

@ -33,27 +33,27 @@ jobs:
name: Sonar JaCoCo Coverage name: Sonar JaCoCo Coverage
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
steps: steps:
- uses: actions/checkout@v6 - uses: actions/checkout@v4
with: with:
ref: "refs/pull/${{ github.event.number }}/merge" ref: "refs/pull/${{ github.event.number }}/merge"
fetch-depth: 0 fetch-depth: 0
- name: Set up JDK17 - name: Set up JDK17
uses: actions/setup-java@v5 uses: actions/setup-java@v4
with: with:
distribution: 'temurin' distribution: 'temurin'
java-version: '17' java-version: '17'
cache: 'maven' cache: 'maven'
- name: Cache SonarCloud packages - name: Cache SonarCloud packages
uses: actions/cache@v5 uses: actions/cache@v4
with: with:
path: ~/.sonar/cache path: ~/.sonar/cache
key: ${{ runner.os }}-sonar key: ${{ runner.os }}-sonar
restore-keys: ${{ runner.os }}-sonar restore-keys: ${{ runner.os }}-sonar
- name: Cache local Maven repository - name: Cache local Maven repository
uses: actions/cache@v5 uses: actions/cache@v4
with: with:
path: ~/.m2/repository path: ~/.m2/repository
key: ${{ runner.os }}-m2-${{ hashFiles('pom.xml', '*/pom.xml', '*/*/pom.xml', '*/*/*/pom.xml') }} key: ${{ runner.os }}-m2-${{ hashFiles('pom.xml', '*/pom.xml', '*/*/pom.xml', '*/*/*/pom.xml') }}

View File

@ -1,49 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
name: 'Close stale issues and PRs'
on:
schedule:
- cron: '30 1 * * *'
jobs:
stale:
runs-on: ubuntu-latest
permissions:
actions: write
issues: write
pull-requests: write
steps:
- uses: actions/stale@v10
with:
stale-issue-message: 'This issue is stale because it has been open for 120 days with no activity. It may be removed by administrators of this project at any time. Remove the stale label or comment to request for removal of it to prevent this.'
stale-pr-message: 'This PR is stale because it has been open for 120 days with no activity. It may be removed by administrators of this project at any time. Remove the stale label or comment to request for removal of it to prevent this.'
close-issue-message: 'This issue was closed because it has been stale for 120 days with no activity.'
close-pr-message: 'This PR was closed because it has been stale for 240 days with no activity.'
stale-issue-label: 'no-issue-activity'
stale-pr-label: 'no-pr-activity'
days-before-stale: 120
days-before-close: -1
days-before-pr-close: 240
exempt-issue-labels: 'gsoc,good-first-issue,long-term-plan'
exempt-pr-labels: 'status:ready-for-merge,status:needs-testing,status:on-hold'
- uses: actions/stale@v10
with:
stale-issue-label: 'archive'
days-before-stale: 240
exempt-issue-labels: 'gsoc,good-first-issue,long-term-plan'
days-before-close: -1

View File

@ -31,10 +31,10 @@ jobs:
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
steps: steps:
- uses: actions/checkout@v6 - uses: actions/checkout@v4
- name: Set up Node - name: Set up Node
uses: actions/setup-node@v6 uses: actions/setup-node@v3
with: with:
node-version: 16 node-version: 16
@ -55,7 +55,7 @@ jobs:
npm run lint npm run lint
npm run test:unit npm run test:unit
- uses: codecov/codecov-action@v6 - uses: codecov/codecov-action@v4
if: github.repository == 'apache/cloudstack' if: github.repository == 'apache/cloudstack'
with: with:
working-directory: ui working-directory: ui

View File

@ -1 +0,0 @@
CHANGES.md

View File

@ -15,122 +15,23 @@
# specific language governing permissions and limitations # specific language governing permissions and limitations
# under the License. # under the License.
--- ---
default_stages: [pre-commit, pre-push] default_stages: [commit, push]
default_language_version: default_language_version:
# force all unspecified Python hooks to run python3 # force all unspecified Python hooks to run python3
python: python3 python: python3
minimum_pre_commit_version: "3.2.0" minimum_pre_commit_version: "2.17.0"
repos: repos:
- repo: meta - repo: meta
hooks: hooks:
- id: identity - id: identity
- id: check-hooks-apply - id: check-hooks-apply
- repo: https://github.com/thlorenz/doctoc.git
rev: v2.2.0
hooks:
- id: doctoc
name: Add TOC for Markdown files
files: ^CONTRIBUTING\.md$|^INSTALL\.md$|^README\.md$
- repo: https://github.com/oxipng/oxipng
rev: v9.1.5
hooks:
- id: oxipng
name: run oxipng
description: optimize PNG images with lossless compression
args: ['-o', '4', '--strip', 'safe', '--alpha']
- repo: https://github.com/gitleaks/gitleaks
rev: v8.27.2
hooks:
- id: gitleaks
name: run gitleaks
description: detect hardcoded secrets
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: v1.5.5
hooks:
- id: chmod
name: set file permissions
args: ['644']
files: \.md$
stages: [manual]
- id: insert-license
name: add license for all cfg files
description: automatically adds a licence header to all cfg files that don't have a license header
files: \.cfg$
args:
- --comment-style
- '|#|'
- --license-filepath
- .github/workflows/license-templates/LICENSE.txt
- --fuzzy-match-generates-todo
- id: insert-license
name: add license for all Markdown files
files: \.md$
args:
- --comment-style
- '<!--|| -->'
- --license-filepath
- .github/workflows/license-templates/LICENSE.txt
- --fuzzy-match-generates-todo
exclude: ^(CHANGES|ISSUE_TEMPLATE|PULL_REQUEST_TEMPLATE)\.md$|^ui/docs/(full|smoke)-test-plan\.template\.md$|^\.github/workflows/.*\.md$|^\.github/aw/.*\.md$
- id: insert-license
name: add license for all properties files
description: automatically adds a licence header to all properties files that don't have a license header
files: \.properties$
args:
- --comment-style
- '|#|'
- --license-filepath
- .github/workflows/license-templates/LICENSE.txt
- --fuzzy-match-generates-todo
- id: insert-license
name: add license for all Shell files
description: automatically adds a licence header to all Shell files that don't have a license header
files: \.sh$
args:
- --comment-style
- '|#|'
- --license-filepath
- .github/workflows/license-templates/LICENSE.txt
- --fuzzy-match-generates-todo
- id: insert-license
name: add license for all SQL files
files: \.sql$
args:
- --comment-style
- '|--|'
- --license-filepath
- .github/workflows/license-templates/LICENSE.txt
- --fuzzy-match-generates-todo
- id: insert-license
name: add license for all Vue files
files: \.vue$
args:
- --comment-style
- '|//|'
- --license-filepath
- .github/workflows/license-templates/LICENSE.txt
- --fuzzy-match-generates-todo
- id: insert-license
name: add license for all YAML files
description: automatically adds a licence header to all YAML files that don't have a license header
files: \.ya?ml$
args:
- --comment-style
- '|#|'
- --license-filepath
- .github/workflows/license-templates/LICENSE.txt
- --fuzzy-match-generates-todo
exclude: ^\.github/workflows/.*\.lock\.yml$
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: v6.0.0 rev: v4.6.0
hooks: hooks:
#- id: check-added-large-files #- id: check-added-large-files
- id: check-case-conflict - id: check-case-conflict
#- id: check-executables-have-shebangs #- id: check-executables-have-shebangs
- id: check-illegal-windows-names
- id: check-merge-conflict - id: check-merge-conflict
- id: check-shebang-scripts-are-executable
files: \.sh$
- id: check-symlinks - id: check-symlinks
- id: check-vcs-permalinks - id: check-vcs-permalinks
#- id: check-yaml #- id: check-yaml
@ -141,7 +42,6 @@ repos:
exclude: > exclude: >
(?x) (?x)
^scripts/vm/systemvm/id_rsa\.cloud$| ^scripts/vm/systemvm/id_rsa\.cloud$|
^server/src/test/java/org/apache/cloudstack/network/ssl/CertServiceTest\.java$|
^server/src/test/java/com/cloud/keystore/KeystoreTest\.java$| ^server/src/test/java/com/cloud/keystore/KeystoreTest\.java$|
^server/src/test/resources/certs/dsa_self_signed\.key$| ^server/src/test/resources/certs/dsa_self_signed\.key$|
^server/src/test/resources/certs/non_root\.key$| ^server/src/test/resources/certs/non_root\.key$|
@ -151,47 +51,47 @@ repos:
^server/src/test/resources/certs/rsa_self_signed\.key$| ^server/src/test/resources/certs/rsa_self_signed\.key$|
^services/console-proxy/rdpconsole/src/test/doc/rdp-key\.pem$| ^services/console-proxy/rdpconsole/src/test/doc/rdp-key\.pem$|
^systemvm/agent/certs/localhost\.key$| ^systemvm/agent/certs/localhost\.key$|
^systemvm/agent/certs/realhostip\.key$| ^systemvm/agent/certs/realhostip\.key$
^test/integration/smoke/test_ssl_offloading\.py$
- id: end-of-file-fixer - id: end-of-file-fixer
exclude: \.vhd$|\.svg$ exclude: \.vhd$
- id: file-contents-sorter #- id: fix-byte-order-marker
args: [--unique]
files: ^\.github/linters/codespell\.txt$
- id: fix-byte-order-marker
- id: forbid-submodules - id: forbid-submodules
- id: mixed-line-ending - id: mixed-line-ending
exclude: \.(cs|xml)$
- id: trailing-whitespace - id: trailing-whitespace
files: ^(LICENSE|NOTICE)$|README$|\.(bat|cfg|config|cs|css|erb|gitignore|header|in|install|java|md|properties|py|rb|rc|sh|sql|svg|te|template|txt|ucls|vue|xml|xsl|yaml|yml)$|^cloud-cli/bindir/cloud-tool$|^debian/changelog$ files: \.(header|in|java|md|properties|py|rb|sh|sql|txt|vue|xml|yaml|yml)$
args: [--markdown-linebreak-ext=md] args: [--markdown-linebreak-ext=md]
exclude: ^services/console-proxy/rdpconsole/src/test/doc/freerdp-debug-log\.txt$ exclude: ^services/console-proxy/rdpconsole/src/test/doc/freerdp-debug-log\.txt$
- repo: https://github.com/codespell-project/codespell - repo: https://github.com/codespell-project/codespell
rev: v2.4.2 rev: v2.2.6
hooks: hooks:
- id: codespell - id: codespell
name: run codespell name: run codespell
description: Check spelling with codespell description: Check spelling with codespell
args: [--ignore-words=.github/linters/codespell.txt]
exclude: ^ui/package\.json$|^ui/package-lock\.json$|^ui/public/js/less\.min\.js$|^ui/public/locales/.*[^n].*\.json$
- repo: https://github.com/pycqa/flake8 - repo: https://github.com/pycqa/flake8
rev: 7.0.0 rev: 7.0.0
hooks: hooks:
- id: flake8 - id: flake8
args: [--config, .github/linters/.flake8] args: [--config, .github/linters/.flake8]
exclude: >
(?x)
^agent/bindir/cloud-setup-agent\.in$|
^client/bindir/cloud-update-xenserver-licenses\.in$|
^cloud-cli/bindir/cloud-tool$|
^python/bindir/cloud-grab-dependent-library-versions$|
^python/bindir/cloud-setup-baremetal$|
^scripts/vm/hypervisor/xenserver/storagePlugin$|
^scripts/vm/hypervisor/xenserver/vmopspremium$|
^setup/bindir/cloud-setup-encryption\.in$|
^venv/.*$
- repo: https://github.com/igorshubovych/markdownlint-cli - repo: https://github.com/igorshubovych/markdownlint-cli
rev: v0.45.0 rev: v0.40.0
hooks: hooks:
- id: markdownlint - id: markdownlint
name: run markdownlint name: run markdownlint
description: check Markdown files with markdownlint description: check Markdown files with markdownlint
args: [--config=.github/linters/.markdown-lint.yml] args: [--config=.github/linters/.markdown-lint.yml]
types: [markdown] types: [markdown]
files: \.md$ files: \.(md|mdown|markdown)$
- repo: https://github.com/adrienverge/yamllint
rev: v1.37.1
hooks:
- id: yamllint
name: run yamllint
description: check YAML files with yamllint
args: [--config-file=.github/linters/.yamllint.yml]
types: [yaml]
files: \.ya?ml$
exclude: ^.*k8s-.*\.ya?ml$|^.github/workflows/.*\.lock\.ya?ml$

View File

@ -242,6 +242,7 @@ Bug ID | Description
[CLOUDSTACK-7722](https://issues.apache.org/jira/browse/CLOUDSTACK-7722) | add.label: Add button for tags show the label not "Add" text... [CLOUDSTACK-7722](https://issues.apache.org/jira/browse/CLOUDSTACK-7722) | add.label: Add button for tags show the label not "Add" text...
[CLOUDSTACK-7246](https://issues.apache.org/jira/browse/CLOUDSTACK-7246) | VM deployment failed due to wrong in script name createipalias.sh... [CLOUDSTACK-7246](https://issues.apache.org/jira/browse/CLOUDSTACK-7246) | VM deployment failed due to wrong in script name createipalias.sh...
Version 4.4.1 Version 4.4.1
------------- -------------
@ -275,6 +276,7 @@ Bug ID | Description
[CLOUDSTACK-1632](https://issues.apache.org/jira/browse/CLOUDSTACK-1632) | Mistakes in authorizeSecurityGroup* API docs... [CLOUDSTACK-1632](https://issues.apache.org/jira/browse/CLOUDSTACK-1632) | Mistakes in authorizeSecurityGroup* API docs...
[CLOUDSTACK-401](https://issues.apache.org/jira/browse/CLOUDSTACK-401) | Storage options missing from table... [CLOUDSTACK-401](https://issues.apache.org/jira/browse/CLOUDSTACK-401) | Storage options missing from table...
Version 4.4.0 Version 4.4.0
------------- -------------
@ -644,12 +646,12 @@ Bug ID | Description
Version 4.2.1 Version 4.2.1
------------- -------------
Release notes contain the list of [bug fixes](https://cloudstack.apache.org/docs/en-US/Apache_CloudStack/4.2.1/html/Release_Notes/version-4.2.html#issues-fixed-4.2.1) Release notes contain the list of [bug fixes](http://cloudstack.apache.org/docs/en-US/Apache_CloudStack/4.2.1/html/Release_Notes/version-4.2.html#issues-fixed-4.2.1)
Version 4.2.0 Version 4.2.0
------------- -------------
Released on October 1 2013. Released on October 1 2013.
Release notes contain the list of [bug fixes](https://cloudstack.apache.org/docs/en-US/Apache_CloudStack/4.2.0/html/Release_Notes/index.html) Release notes contain the list of [bug fixes](http://cloudstack.apache.org/docs/en-US/Apache_CloudStack/4.2.0/html/Release_Notes/index.html)
Version 4.1.0 Version 4.1.0
------------- -------------
@ -928,6 +930,7 @@ Security Fixes:
* CVE-2012-4501: Apache CloudStack configuration vulnerability * CVE-2012-4501: Apache CloudStack configuration vulnerability
Version 4.0.2 Version 4.0.2
------------------------ ------------------------
@ -976,6 +979,7 @@ Issues fixed in this release:
* CLOUDSTACK-2090: Upgrade from version 4.0.1 to version 4.0.2 triggers the 4.0.0 to 4.0.1. * CLOUDSTACK-2090: Upgrade from version 4.0.1 to version 4.0.2 triggers the 4.0.0 to 4.0.1.
* CLOUDSTACK-2091: Error in API documentation for 4.0.x. * CLOUDSTACK-2091: Error in API documentation for 4.0.x.
Version 4.0.1-incubating Version 4.0.1-incubating
------------------------ ------------------------
@ -1019,6 +1023,7 @@ Bugs fixed in this release:
* CLOUDSTACK-961: Installation docs don't detail dependencies for building RPMs * CLOUDSTACK-961: Installation docs don't detail dependencies for building RPMs
* CLOUDSTACK-995: Not able to add the KVM host * CLOUDSTACK-995: Not able to add the KVM host
Version 4.0.0-incubating Version 4.0.0-incubating
------------------------ ------------------------
@ -1051,6 +1056,7 @@ Security Fixes:
* CVE-2012-4501: Apache CloudStack configuration vulnerability * CVE-2012-4501: Apache CloudStack configuration vulnerability
Updating this file Updating this file
------------------ ------------------

View File

@ -1,79 +1,46 @@
<!-- Contributing to Apache CloudStack (ACS)
Licensed to the Apache Software Foundation (ASF) under one =======================================
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
# Contributing to Apache CloudStack (ACS)
## Summary
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Summary](#summary)
- [Bug fixes](#bug-fixes)
- [Developing new features](#developing-new-features)
- [PendingReleaseNotes file](#pendingreleasenotes-file)
- [Fork the code](#fork-the-code)
- [Making changes](#making-changes)
- [Rebase `feature_x` to include updates from `upstream/main`](#rebase-feature_x-to-include-updates-from-upstreammain)
- [Make a GitHub Pull Request to contribute your changes](#make-a-github-pull-request-to-contribute-your-changes)
- [Cleaning up after a successful pull request](#cleaning-up-after-a-successful-pull-request)
- [Release Principles](#release-principles)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## Summary
Summary
-------
This document covers how to contribute to the ACS project. ACS uses GitHub PRs to manage code contributions. This document covers how to contribute to the ACS project. ACS uses GitHub PRs to manage code contributions.
These instructions assume you have a GitHub.com account, so if you don't have one you will have to create one. Your proposed code changes will be published to your own fork of the ACS project, and you will submit a Pull Request for your changes to be added. These instructions assume you have a GitHub.com account, so if you don't have one you will have to create one. Your proposed code changes will be published to your own fork of the ACS project and you will submit a Pull Request for your changes to be added.
_Let's get started!!!_ _Lets get started!!!_
## Bug fixes Bug fixes
---------
It's very important that we can easily track bug fix commits, so their hashes should remain the same in all branches. It's very important that we can easily track bug fix commits, so their hashes should remain the same in all branches.
Therefore, a pull request (PR) that fixes a bug, should be sent against a release branch. Therefore, a pull request (PR) that fixes a bug, should be sent against a release branch.
This can be either the "current release" or the "previous release", depending on which ones are maintained. This can be either the "current release" or the "previous release", depending on which ones are maintained.
Since the goal is a stable main, bug fixes should be "merged forward" to the next branch in order: "previous release" -> "current release" -> main (in other words: old to new) Since the goal is a stable main, bug fixes should be "merged forward" to the next branch in order: "previous release" -> "current release" -> main (in other words: old to new)
## Developing new features Developing new features
-----------------------
Development should be done in a feature branch, branched off of main. Development should be done in a feature branch, branched off of main.
Send a PR(steps below) to get it into main (2x LGTM applies). Send a PR(steps below) to get it into main (2x LGTM applies).
PR will only be merged when main is open, will be held otherwise until main is open again. PR will only be merged when main is open, will be held otherwise until main is open again.
No back porting / cherry-picking features to existing branches! No back porting / cherry-picking features to existing branches!
## PendingReleaseNotes file PendingReleaseNotes file
------------------------
When developing a new feature or making a (major) change to an existing feature you are encouraged to append this to the PendingReleaseNotes file so that the Release Manager can When developing a new feature or making a (major) change to a existing feature you are encouraged to append this to the PendingReleaseNotes file so that the Release Manager can
use this file as a source of information when compiling the Release Notes for a new release. use this file as a source of information when compiling the Release Notes for a new release.
When adding information to the PendingReleaseNotes file make sure that you write a good and understandable description of the new feature or change which you have developed. When adding information to the PendingReleaseNotes file make sure that you write a good and understandable description of the new feature or change which you have developed.
Updating the PendingReleaseNotes file is preferably a part of the original Pull Request, but that is up to the developers' discretion. Updating the PendingReleaseNotes file is preferably a part of the original Pull Request, but that is up to the developers' discretion.
## Fork the code Fork the code
-------------
In your browser, navigate to: [https://github.com/apache/cloudstack](https://github.com/apache/cloudstack) In your browser, navigate to: [https://github.com/apache/cloudstack](https://github.com/apache/cloudstack)
Fork the repository by clicking on the 'Fork' button on the top right hand side. The fork will happen, and you will be taken to your own fork of the repository. Copy the Git repository URL by clicking on the clipboard next to the URL on the right hand side of the page under '**HTTPS** clone URL'. You will paste this URL when doing the following `git clone` command. Fork the repository by clicking on the 'Fork' button on the top right hand side. The fork will happen and you will be taken to your own fork of the repository. Copy the Git repository URL by clicking on the clipboard next to the URL on the right hand side of the page under '**HTTPS** clone URL'. You will paste this URL when doing the following `git clone` command.
On your computer, follow these steps to set up a local repository for working on ACS: On your computer, follow these steps to setup a local repository for working on ACS:
```bash ```bash
$ git clone https://github.com/YOUR_ACCOUNT/cloudstack.git $ git clone https://github.com/YOUR_ACCOUNT/cloudstack.git
@ -84,7 +51,10 @@ $ git fetch upstream
$ git rebase upstream/main $ git rebase upstream/main
``` ```
## Making changes
Making changes
--------------
It is important that you create a new branch to make changes on and that you do not change the `main` branch (other than to rebase in changes from `upstream/main`). In this example I will assume you will be making your changes to a branch called `feature_x`. This `feature_x` branch will be created on your local repository and will be pushed to your forked repository on GitHub. Once this branch is on your fork you will create a Pull Request for the changes to be added to the ACS project. It is important that you create a new branch to make changes on and that you do not change the `main` branch (other than to rebase in changes from `upstream/main`). In this example I will assume you will be making your changes to a branch called `feature_x`. This `feature_x` branch will be created on your local repository and will be pushed to your forked repository on GitHub. Once this branch is on your fork you will create a Pull Request for the changes to be added to the ACS project.
@ -100,7 +70,9 @@ $ git commit -a -m "descriptive commit message for your changes"
> The `-b` specifies that you want to create a new branch called `feature_x`. You only specify `-b` the first time you checkout because you are creating a new branch. Once the `feature_x` branch exists, you can later switch to it with only `git checkout feature_x`. > The `-b` specifies that you want to create a new branch called `feature_x`. You only specify `-b` the first time you checkout because you are creating a new branch. Once the `feature_x` branch exists, you can later switch to it with only `git checkout feature_x`.
## Rebase `feature_x` to include updates from `upstream/main`
Rebase `feature_x` to include updates from `upstream/main`
------------------------------------------------------------
It is important that you maintain an up-to-date `main` branch in your local repository. This is done by rebasing in the code changes from `upstream/main` (the official ACS project repository) into your local repository. You will want to do this before you start working on a feature as well as right before you submit your changes as a pull request. I recommend you do this process periodically while you work to make sure you are working off the most recent project code. It is important that you maintain an up-to-date `main` branch in your local repository. This is done by rebasing in the code changes from `upstream/main` (the official ACS project repository) into your local repository. You will want to do this before you start working on a feature as well as right before you submit your changes as a pull request. I recommend you do this process periodically while you work to make sure you are working off the most recent project code.
@ -120,11 +92,13 @@ $ git rebase main
> Now your `feature_x` branch is up-to-date with all the code in `upstream/main`. > Now your `feature_x` branch is up-to-date with all the code in `upstream/main`.
## Make a GitHub Pull Request to contribute your changes
When you are happy with your changes, and you are ready to contribute them, you will create a Pull Request on GitHub to do so. This is done by pushing your local changes to your forked repository (default remote name is `origin`) and then initiating a pull request on GitHub. Make a GitHub Pull Request to contribute your changes
-----------------------------------------------------
Please include JIRA id, detailed information about the bug/feature, what all tests are executed, how the reviewer can test this feature etc. In case of UI PRs, a screenshot is preferred. When you are happy with your changes and you are ready to contribute them, you will create a Pull Request on GitHub to do so. This is done by pushing your local changes to your forked repository (default remote name is `origin`) and then initiating a pull request on GitHub.
Please include JIRA id, detailed information about the bug/feature, what all tests are executed, how the reviewer can test this feature etc. Incase of UI PRs, a screenshot is preferred.
> **IMPORTANT:** Make sure you have rebased your `feature_x` branch to include the latest code from `upstream/main` _before_ you do this. > **IMPORTANT:** Make sure you have rebased your `feature_x` branch to include the latest code from `upstream/main` _before_ you do this.
@ -144,7 +118,9 @@ To initiate the pull request, do the following:
If you are requested to make modifications to your proposed changes, make the changes locally on your `feature_x` branch, re-push the `feature_x` branch to your fork. The existing pull request should automatically pick up the change and update accordingly. If you are requested to make modifications to your proposed changes, make the changes locally on your `feature_x` branch, re-push the `feature_x` branch to your fork. The existing pull request should automatically pick up the change and update accordingly.
## Cleaning up after a successful pull request
Cleaning up after a successful pull request
-------------------------------------------
Once the `feature_x` branch has been committed into the `upstream/main` branch, your local `feature_x` branch and the `origin/feature_x` branch are no longer needed. If you want to make additional changes, restart the process with a new branch. Once the `feature_x` branch has been committed into the `upstream/main` branch, your local `feature_x` branch and the `origin/feature_x` branch are no longer needed. If you want to make additional changes, restart the process with a new branch.
@ -158,6 +134,6 @@ $ git branch -D feature_x
$ git push origin :feature_x $ git push origin :feature_x
``` ```
## Release Principles Release Principles
------------------
Detailed information about ACS release principles is available at https://cwiki.apache.org/confluence/display/CLOUDSTACK/Release+principles+for+Apache+CloudStack+4.6+and+up Detailed information about ACS release principles is available at https://cwiki.apache.org/confluence/display/CLOUDSTACK/Release+principles+for+Apache+CloudStack+4.6+and+up

View File

@ -1,46 +1,15 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
# Apache CloudStack Installation basics # Apache CloudStack Installation basics
This document describes how to develop, build, package and install Apache This document describes how to develop, build, package and install Apache
CloudStack. For more information please refer to the official [documentation](https://docs.cloudstack.apache.org) CloudStack. For more information please refer to the official [documentation](http://docs.cloudstack.apache.org)
or the developer [wiki](https://cwiki.apache.org/confluence/display/CLOUDSTACK/Home). or the developer [wiki](https://cwiki.apache.org/confluence/display/CLOUDSTACK/Home).
Apache CloudStack developers use various platforms for development, this guide Apache CloudStack developers use various platforms for development, this guide
was tested against a CentOS 7 x86_64 setup. was tested against a CentOS 7 x86_64 setup.
<!-- START doctoc generated TOC please keep comment here to allow auto update --> * [Setting up development environment](https://cwiki.apache.org/confluence/display/CLOUDSTACK/Setting+up+CloudStack+Development+Environment) for Apache CloudStack.
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE --> * [Building](https://cwiki.apache.org/confluence/display/CLOUDSTACK/How+to+build+CloudStack) Apache CloudStack.
* [Appliance based development](https://github.com/rhtyd/monkeybox)
- [Setting up Development Environment](#setting-up-development-environment)
- [Using jenv and/or pyenv for Version Management](#using-jenv-andor-pyenv-for-version-management)
- [Getting the Source Code](#getting-the-source-code)
- [Building](#building)
- [To bring up CloudStack UI](#to-bring-up-cloudstack-ui)
- [Building with non-redistributable plugins](#building-with-non-redistributable-plugins)
- [Packaging and Installation](#packaging-and-installation)
- [Debian/Ubuntu](#debianubuntu)
- [RHEL/CentOS](#rhelcentos)
- [Notes](#notes)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## Setting up Development Environment ## Setting up Development Environment
@ -49,30 +18,29 @@ Install tools and dependencies used for development:
# yum -y install git java-17-openjdk java-17-openjdk-devel \ # yum -y install git java-17-openjdk java-17-openjdk-devel \
mysql mysql-server mkisofs git gcc python MySQL-python openssh-clients wget mysql mysql-server mkisofs git gcc python MySQL-python openssh-clients wget
Set up Maven (3.9.10): Set up Maven (3.6.0):
# wget https://dlcdn.apache.org/maven/maven-3/3.9.10/binaries/apache-maven-3.9.10-bin.tar.gz # wget http://www.us.apache.org/dist/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.tar.gz
# sudo tar -zxvf apache-maven-3.9.10-bin.tar.gz -C /usr/local # tar -zxvf apache-maven-3.6.3-bin.tar.gz -C /usr/local
# cd /usr/local # cd /usr/local
# sudo ln -s apache-maven-3.9.10 maven # ln -s apache-maven-3.6.3 maven
# echo export M2_HOME=/usr/local/maven >> ~/.bashrc # or .zshrc or .profile # echo export M2_HOME=/usr/local/maven >> ~/.bashrc # or .zshrc or .profile
# echo export PATH=/usr/local/maven/bin:${PATH} >> ~/.bashrc # or .zshrc or .profile # echo export PATH=/usr/local/maven/bin:${PATH} >> ~/.bashrc # or .zshrc or .profile
# source ~/.bashrc # source ~/.bashrc
Setup up Node.js 16: Setup up NodeJS (LTS):
# curl -sL https://rpm.nodesource.com/setup_16.x | sudo -E bash - # curl -sL https://rpm.nodesource.com/setup_12.x | sudo bash -
# sudo yum install nodejs # sudo yum install nodejs
# sudo npm install -g @vue/cli npm-check-updates # sudo npm install -g @vue/cli npm-check-updates
Start the MySQL service: Start the MySQL service:
$ service mysqld start $ service mysqld start
$ mysql_secure_installation
### Using jenv and/or pyenv for Version Management ### Using jenv and/or pyenv for Version Management
CloudStack is built using Java and Python. To make selection of these tools versions more consistent and ease installation for developers, optional support for [jenv](http://www.jenv.be/) and [pyenv](https://github.com/yyuu/pyenv) with [virtualenv]|(https://github.com/yyuu/pyenv-virtualenv) is provided. jenv installation instructions are available here and pyenv installation instructions are available here. For users of [oh-my-zsh](https://ohmyz.sh/) there is a pyenv plugin available to trigger configuration of pyenv in a shell session. CloudStack is built using Java and Python. To make selection of these tools versions more consistent and ease installation for developers, optional support for [jenv](http://www.jenv.be/) and [pyenv](https://github.com/yyuu/pyenv) with [virtualenv]|(https://github.com/yyuu/pyenv-virtualenv) is provided. jenv installation instructions are available here and pyenv installation instructions are available here. For users of [oh-my-zsh](http://ohmyz.sh/) there is a pyenv plugin available to trigger configuration of pyenv in a shell session.
Following installation, execute the following commands to configure jenv and pyenv for use with CloudStack development: Following installation, execute the following commands to configure jenv and pyenv for use with CloudStack development:
@ -118,33 +86,13 @@ Start the management server:
If this works, you've successfully setup a single server Apache CloudStack installation. If this works, you've successfully setup a single server Apache CloudStack installation.
To access the Management Server UI, follow the following procedure: Open the following URL on your browser to access the Management Server UI:
http://localhost:8080/client/
The default credentials are; user: admin, password: password and the domain The default credentials are; user: admin, password: password and the domain
field should be left blank which is defaulted to the ROOT domain. field should be left blank which is defaulted to the ROOT domain.
## To bring up CloudStack UI
Move to UI Directory
$ cd /path/to/cloudstack/ui
To install dependencies.
$ npm install
To build the project.
$ npm run build
For Development Mode.
$ npm start
Make sure to set `CS_URL=http://localhost:8080` on the `.env.local` file on UI.
You should be able to run the management server on http://localhost:5050
## Building with non-redistributable plugins ## Building with non-redistributable plugins
CloudStack supports several plugins that depend on libraries with distribution restrictions. CloudStack supports several plugins that depend on libraries with distribution restrictions.
@ -202,7 +150,7 @@ All the rpm packages will be created in `dist/rpmbuild/RPMS/x86_64` directory.
## Notes ## Notes
If you will be using Xen as your hypervisor, please download [vhd-util](https://download.cloudstack.org/tools/vhd-util) If you will be using Xen as your hypervisor, please download [vhd-util](http://download.cloudstack.org/tools/vhd-util)
If management server is installed on RHEL/CentOS, then copy vhd-util into: If management server is installed on RHEL/CentOS, then copy vhd-util into:

View File

@ -35,14 +35,17 @@ New line separated list of affected versions, commit ID for issues on main branc
Information about the configuration if relevant, e.g. basic network, advanced networking, etc. N/A otherwise Information about the configuration if relevant, e.g. basic network, advanced networking, etc. N/A otherwise
--> -->
##### OS / ENVIRONMENT ##### OS / ENVIRONMENT
<!-- <!--
Information about the environment if relevant, N/A otherwise Information about the environment if relevant, N/A otherwise
--> -->
##### SUMMARY ##### SUMMARY
<!-- Explain the problem/feature briefly --> <!-- Explain the problem/feature briefly -->
##### STEPS TO REPRODUCE ##### STEPS TO REPRODUCE
<!-- <!--
For bugs, show exactly how to reproduce the problem, using a minimal test-case. Use Screenshots if accurate. For bugs, show exactly how to reproduce the problem, using a minimal test-case. Use Screenshots if accurate.

View File

@ -1,62 +0,0 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
# pre-commit
We run [pre-commit](https://pre-commit.com/) with
[GitHub Actions](https://github.com/apache/cloudstack/blob/main/.github/workflows/pre-commit.yml) so installation on your
local machine is currently optional.
The `pre-commit` [configuration file](https://github.com/apache/cloudstack/blob/main/.pre-commit-config.yaml)
is in the repository root. Before you can run the hooks, you need to have `pre-commit` installed. `pre-commit` is a
[Python package](https://pypi.org/project/pre-commit/).
From the repository root run: `pip install -r requirements-dev.txt` to install `pre-commit` and after you install
`pre-commit` you will then need to install the pre-commit hooks by running `pre-commit install`.
The hooks run when running `git commit` and also from the command line with `pre-commit`. Some of the hooks will auto
fix the code after the hooks fail whilst most will print error messages from the linters. If a hook fails the overall
commit will fail, and you will need to fix the issues or problems and `git add` and `git commit` again. On `git commit`
the hooks will run mostly only against modified files so if you want to test all hooks against all files and when you
are adding a new hook you should always run:
`pre-commit run --all-files`
Sometimes you might need to skip a hook to commit because the hook is stopping you from committing or your computer
might not have all the installation requirements for all the hooks. The `SKIP` variable is comma separated for two or
more hooks:
`SKIP=codespell git commit -m "foo"`
The same applies when running pre-commit:
`SKIP=codespell pre-commit run --all-files`
Occasionally you can have more serious problems when using `pre-commit` with `git commit`. You can use `--no-verify` to
commit and stop `pre-commit` from checking the hooks. For example:
`git commit --no-verify -m "foo"`
If you are having major problems using `pre-commit` you can always uninstall it.
To run a single hook use `pre-commit run --all-files <hook_id>`
For example just run the `codespell` hook:
`pre-commit run --all-files codespell`

View File

@ -22,8 +22,8 @@ This PR...
- [ ] Bug fix (non-breaking change which fixes an issue) - [ ] Bug fix (non-breaking change which fixes an issue)
- [ ] Enhancement (improves an existing feature and functionality) - [ ] Enhancement (improves an existing feature and functionality)
- [ ] Cleanup (Code refactoring and cleanup, that may add test cases) - [ ] Cleanup (Code refactoring and cleanup, that may add test cases)
- [ ] Build/CI - [ ] build/CI
- [ ] Test (unit or integration test code) - [ ] test (unit or integration test code)
### Feature/Enhancement Scale or Bug Severity ### Feature/Enhancement Scale or Bug Severity
@ -40,8 +40,10 @@ This PR...
- [ ] Minor - [ ] Minor
- [ ] Trivial - [ ] Trivial
### Screenshots (if appropriate): ### Screenshots (if appropriate):
### How Has This Been Tested? ### How Has This Been Tested?
<!-- Please describe in detail how you tested your changes. --> <!-- Please describe in detail how you tested your changes. -->
@ -51,4 +53,5 @@ This PR...
<!-- see how your change affects other areas of the code, etc. --> <!-- see how your change affects other areas of the code, etc. -->
<!-- Please read the [CONTRIBUTING](https://github.com/apache/cloudstack/blob/main/CONTRIBUTING.md) document --> <!-- Please read the [CONTRIBUTING](https://github.com/apache/cloudstack/blob/main/CONTRIBUTING.md) document -->

View File

@ -1,54 +1,7 @@
<!-- # Apache CloudStack [![Build Status](https://github.com/apache/cloudstack/actions/workflows/build.yml/badge.svg?branch=main)](https://github.com/apache/cloudstack/actions/workflows/build.yml) [![UI Build](https://github.com/apache/cloudstack/actions/workflows/ui.yml/badge.svg)](https://github.com/apache/cloudstack/actions/workflows/ui.yml) [![License Check](https://github.com/apache/cloudstack/actions/workflows/rat.yml/badge.svg?branch=main)](https://github.com/apache/cloudstack/actions/workflows/rat.yml) [![Simulator CI](https://github.com/apache/cloudstack/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/apache/cloudstack/actions/workflows/ci.yml) [![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=apache_cloudstack&metric=alert_status)](https://sonarcloud.io/dashboard?id=apache_cloudstack) [![codecov](https://codecov.io/gh/apache/cloudstack/branch/main/graph/badge.svg)](https://codecov.io/gh/apache/cloudstack)
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
# Apache CloudStack
[![Build Status](https://github.com/apache/cloudstack/actions/workflows/build.yml/badge.svg?branch=main)](https://github.com/apache/cloudstack/actions/workflows/build.yml)
[![codecov](https://codecov.io/gh/apache/cloudstack/branch/main/graph/badge.svg)](https://codecov.io/gh/apache/cloudstack)
[![Docker CloudStack Simulator Status](https://github.com/apache/cloudstack/actions/workflows/docker-cloudstack-simulator.yml/badge.svg?branch=main)](https://github.com/apache/cloudstack/actions/workflows/docker-cloudstack-simulator.yml)
[![License Check](https://github.com/apache/cloudstack/actions/workflows/rat.yml/badge.svg?branch=main)](https://github.com/apache/cloudstack/actions/workflows/rat.yml)
[![Linter Status](https://github.com/apache/cloudstack/actions/workflows/linter.yml/badge.svg)](https://github.com/apache/cloudstack/actions/workflows/linter.yml)
[![Merge Conflict Checker Status](https://github.com/apache/cloudstack/actions/workflows/merge-conflict-checker.yml/badge.svg?branch=main)](https://github.com/apache/cloudstack/actions/workflows/merge-conflict-checker.yml)
[![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=apache_cloudstack&metric=alert_status)](https://sonarcloud.io/dashboard?id=apache_cloudstack)
[![Simulator CI](https://github.com/apache/cloudstack/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/apache/cloudstack/actions/workflows/ci.yml)
[![UI Build](https://github.com/apache/cloudstack/actions/workflows/ui.yml/badge.svg?branch=main)](https://github.com/apache/cloudstack/actions/workflows/ui.yml)
[![Apache CloudStack](tools/logo/apache_cloudstack.png)](https://cloudstack.apache.org/) [![Apache CloudStack](tools/logo/apache_cloudstack.png)](https://cloudstack.apache.org/)
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Who Uses CloudStack?](#who-uses-cloudstack)
- [Demo](#demo)
- [Getting Started](#getting-started)
- [Getting Source Repository](#getting-source-repository)
- [Documentation](#documentation)
- [News and Events](#news-and-events)
- [Getting Involved and Contributing](#getting-involved-and-contributing)
- [Reporting Security Vulnerabilities](#reporting-security-vulnerabilities)
- [License](#license)
- [Notice of Cryptographic Software](#notice-of-cryptographic-software)
- [Star History](#star-history)
- [Contributors](#contributors)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
Apache CloudStack is open source software designed to deploy and manage large Apache CloudStack is open source software designed to deploy and manage large
networks of virtual machines, as a highly available, highly scalable networks of virtual machines, as a highly available, highly scalable
Infrastructure as a Service (IaaS) cloud computing platform. CloudStack is used Infrastructure as a Service (IaaS) cloud computing platform. CloudStack is used
@ -68,7 +21,7 @@ OVM and LXC containers.
Users can manage their cloud with an easy to use Web interface, command line Users can manage their cloud with an easy to use Web interface, command line
tools, and/or a full-featured query based API. tools, and/or a full-featured query based API.
For more information on Apache CloudStack, please visit the [website](https://cloudstack.apache.org) For more information on Apache CloudStack, please visit the [website](http://cloudstack.apache.org)
## Who Uses CloudStack? ## Who Uses CloudStack?
@ -125,10 +78,10 @@ via GitHub pull requests.
## Getting Involved and Contributing ## Getting Involved and Contributing
Interested in helping out with Apache CloudStack? Great! We welcome Interested in helping out with Apache CloudStack? Great! We welcome
participation from anybody willing to work [The Apache Way](https://theapacheway.com) and make a participation from anybody willing to work [The Apache Way](http://theapacheway.com) and make a
contribution. Note that you do not have to be a developer in order to contribute contribution. Note that you do not have to be a developer in order to contribute
to Apache CloudStack. We need folks to help with documentation, translation, to Apache CloudStack. We need folks to help with documentation, translation,
promotion etc. See our contribution [page](https://cloudstack.apache.org/contribute.html). promotion etc. See our contribution [page](http://cloudstack.apache.org/contribute.html).
If you are a frequent contributors, you can request to be added as collaborators If you are a frequent contributors, you can request to be added as collaborators
(see https://cwiki.apache.org/confluence/display/INFRA/Git+-+.asf.yaml+features#Git.asf.yamlfeatures-AssigningexternalcollaboratorswiththetriageroleonGitHub) (see https://cwiki.apache.org/confluence/display/INFRA/Git+-+.asf.yaml+features#Git.asf.yamlfeatures-AssigningexternalcollaboratorswiththetriageroleonGitHub)
@ -139,7 +92,7 @@ You may do so by sharing your GitHub users ID or raise a GitHub issue.
If you're interested in learning more or participating in the Apache CloudStack If you're interested in learning more or participating in the Apache CloudStack
project, the mailing lists are the best way to do that. While the project has project, the mailing lists are the best way to do that. While the project has
several communications channels, the [mailing lists](https://cloudstack.apache.org/mailing-lists.html) are the most active and the several communications channels, the [mailing lists](http://cloudstack.apache.org/mailing-lists.html) are the most active and the
official channels for making decisions about the project itself. official channels for making decisions about the project itself.
Mailing lists: Mailing lists:
@ -159,7 +112,7 @@ released version of CloudStack, please report it to `security@apache.org` with
details about the vulnerability, how it might be exploited, and any additional details about the vulnerability, how it might be exploited, and any additional
information that might be useful. information that might be useful.
For more details, please visit our security [page](https://cloudstack.apache.org/security.html). For more details, please visit our security [page](http://cloudstack.apache.org/security.html).
## License ## License
@ -207,11 +160,3 @@ The following provides more details on the included cryptographic software:
* CloudStack makes use of the Bouncy Castle general-purpose encryption library. * CloudStack makes use of the Bouncy Castle general-purpose encryption library.
* CloudStack can optionally interact with and control OpenSwan-based VPNs. * CloudStack can optionally interact with and control OpenSwan-based VPNs.
* CloudStack has a dependency on and makes use of JSch - a java SSH2 implementation. * CloudStack has a dependency on and makes use of JSch - a java SSH2 implementation.
## Star History
[![Apache CloudStack Star History](https://api.star-history.com/svg?repos=apache/cloudstack&type=Date)](https://www.star-history.com/#apache/cloudstack&Date)
## Contributors
[![Apache CloudStack Contributors](https://contrib.rocks/image?repo=apache/cloudstack&anon=0&max=500)](https://github.com/apache/cloudstack/graphs/contributors)

View File

@ -35,7 +35,7 @@ for pythonpath in (
from cloudutils.cloudException import CloudRuntimeException, CloudInternalException from cloudutils.cloudException import CloudRuntimeException, CloudInternalException
from cloudutils.utilities import initLoging, bash from cloudutils.utilities import initLoging, bash
from cloudutils.configFileOps import configFileOps from cloudutils.configFileOps import configFileOps
from cloudutils.globalEnv import globalEnv from cloudutils.globalEnv import globalEnv
from cloudutils.networkConfig import networkConfig from cloudutils.networkConfig import networkConfig
from cloudutils.syscfg import sysConfigFactory from cloudutils.syscfg import sysConfigFactory
@ -43,41 +43,35 @@ from cloudutils.serviceConfig import configureLibvirtConfig, configure_libvirt_t
from optparse import OptionParser from optparse import OptionParser
def getUserInputs(): def getUserInputs():
print("Welcome to the CloudStack Agent Setup:") print("Welcome to the CloudStack Agent Setup:")
cfo = configFileOps("@AGENTSYSCONFDIR@/agent.properties") cfo = configFileOps("@AGENTSYSCONFDIR@/agent.properties")
oldMgt = cfo.getEntry("host") oldMgt = cfo.getEntry("host")
mgtSvr = input( mgtSvr = input("Please input the Management Server Hostname/IP-Address:[%s]"%oldMgt)
"Please input the Management Server Hostname/IP-Address:[%s]" % oldMgt
)
if mgtSvr == "": if mgtSvr == "":
mgtSvr = oldMgt mgtSvr = oldMgt
try: try:
socket.getaddrinfo(mgtSvr, 443) socket.getaddrinfo(mgtSvr, 443)
except: except:
print( print("Failed to resolve %s. Please input a valid hostname or IP-Address."%mgtSvr)
"Failed to resolve %s. Please input a valid hostname or IP-Address."
% mgtSvr
)
exit(1) exit(1)
oldToken = cfo.getEntry("zone") oldToken = cfo.getEntry("zone")
zoneToken = input("Please input the Zone Id:[%s]" % oldToken) zoneToken = input("Please input the Zone Id:[%s]"%oldToken)
if zoneToken == "": if zoneToken == "":
zoneToken = oldToken zoneToken = oldToken
oldPod = cfo.getEntry("pod") oldPod = cfo.getEntry("pod")
podId = input("Please input the Pod Id:[%s]" % oldPod) podId = input("Please input the Pod Id:[%s]"%oldPod)
if podId == "": if podId == "":
podId = oldToken podId = oldToken
oldCluster = cfo.getEntry("cluster") oldCluster = cfo.getEntry("cluster")
clusterId = input("Please input the Cluster Id:[%s]" % oldCluster) clusterId = input("Please input the Cluster Id:[%s]"%oldCluster)
if clusterId == "": if clusterId == "":
clusterId = oldCluster clusterId = oldCluster
@ -85,20 +79,18 @@ def getUserInputs():
if oldHypervisor == "": if oldHypervisor == "":
oldHypervisor = "kvm" oldHypervisor = "kvm"
hypervisor = input("Please input the Hypervisor type kvm/lxc:[%s]" % oldHypervisor) hypervisor = input("Please input the Hypervisor type kvm/lxc:[%s]"%oldHypervisor)
if hypervisor == "": if hypervisor == "":
hypervisor = oldHypervisor hypervisor = oldHypervisor
try: try:
defaultNic = networkConfig.getDefaultNetwork() defaultNic = networkConfig.getDefaultNetwork()
except: except:
print( print("Failed to get default route. Please configure your network to have a default route")
"Failed to get default route. Please configure your network to have a default route"
)
exit(1) exit(1)
defNic = defaultNic.name defNic = defaultNic.name
network = input("Please choose which network used to create VM:[%s]" % defNic) network = input("Please choose which network used to create VM:[%s]"%defNic)
if network == "": if network == "":
if defNic == "": if defNic == "":
print("You need to specify one of Nic or bridge on your system") print("You need to specify one of Nic or bridge on your system")
@ -108,8 +100,7 @@ def getUserInputs():
return [mgtSvr, zoneToken, network, podId, clusterId, hypervisor] return [mgtSvr, zoneToken, network, podId, clusterId, hypervisor]
if __name__ == '__main__':
if __name__ == "__main__":
initLoging("@AGENTLOGDIR@/setup.log") initLoging("@AGENTLOGDIR@/setup.log")
glbEnv = globalEnv() glbEnv = globalEnv()
@ -117,23 +108,13 @@ if __name__ == "__main__":
glbEnv.agentMode = "Agent" glbEnv.agentMode = "Agent"
parser = OptionParser() parser = OptionParser()
parser.add_option("-a", action="store_true", dest="auto", help="auto mode") parser.add_option("-a", action="store_true", dest="auto", help="auto mode")
parser.add_option( parser.add_option("-m", "--host", dest="mgt", help="Management server hostname or IP-Address")
"-m", "--host", dest="mgt", help="Management server hostname or IP-Address"
)
parser.add_option("-z", "--zone", dest="zone", help="zone id") parser.add_option("-z", "--zone", dest="zone", help="zone id")
parser.add_option("-p", "--pod", dest="pod", help="pod id") parser.add_option("-p", "--pod", dest="pod", help="pod id")
parser.add_option("-c", "--cluster", dest="cluster", help="cluster id") parser.add_option("-c", "--cluster", dest="cluster", help="cluster id")
parser.add_option( parser.add_option("-t", "--hypervisor", default="kvm", dest="hypervisor", help="hypervisor type")
"-t", "--hypervisor", default="kvm", dest="hypervisor", help="hypervisor type"
)
parser.add_option("-g", "--guid", dest="guid", help="guid") parser.add_option("-g", "--guid", dest="guid", help="guid")
parser.add_option( parser.add_option("-s", action="store_true", default=False, dest="secure", help="Secure and enable TLS for libvirtd")
"-s",
action="store_true",
default=False,
dest="secure",
help="Secure and enable TLS for libvirtd",
)
parser.add_option("--pubNic", dest="pubNic", help="Public traffic interface") parser.add_option("--pubNic", dest="pubNic", help="Public traffic interface")
parser.add_option("--prvNic", dest="prvNic", help="Private traffic interface") parser.add_option("--prvNic", dest="prvNic", help="Private traffic interface")
parser.add_option("--guestNic", dest="guestNic", help="Guest traffic interface") parser.add_option("--guestNic", dest="guestNic", help="Guest traffic interface")
@ -159,15 +140,15 @@ if __name__ == "__main__":
glbEnv.pod = userInputs[3] glbEnv.pod = userInputs[3]
glbEnv.cluster = userInputs[4] glbEnv.cluster = userInputs[4]
glbEnv.hypervisor = userInputs[5] glbEnv.hypervisor = userInputs[5]
# generate UUID #generate UUID
glbEnv.uuid = old_config.getEntry("guid") glbEnv.uuid = old_config.getEntry("guid")
if glbEnv.uuid == "": if glbEnv.uuid == "":
glbEnv.uuid = bash("uuidgen").getStdout() glbEnv.uuid = bash("uuidgen").getStdout()
else: else:
for para, value in list(options.__dict__.items()): for para, value in list(options.__dict__.items()):
if value is None: if value is None:
print("Missing operand:%s" % para) print("Missing operand:%s"%para)
print("Try %s --help for more information" % sys.argv[0]) print("Try %s --help for more information"%sys.argv[0])
sys.exit(1) sys.exit(1)
glbEnv.uuid = options.guid glbEnv.uuid = options.guid
@ -187,7 +168,7 @@ if __name__ == "__main__":
try: try:
syscfg.config() syscfg.config()
print("CloudStack Agent setup is done!") print("CloudStack Agent setup is done!")
except (CloudRuntimeException, CloudInternalException) as e: except (CloudRuntimeException,CloudInternalException) as e:
print(e) print(e)
print("Try to restore your system:") print("Try to restore your system:")
try: try:

View File

@ -209,13 +209,12 @@ hypervisor.type=kvm
# the management server would send. # the management server would send.
# In case of arm64 (aarch64), this will change the machine type to 'virt' and # In case of arm64 (aarch64), this will change the machine type to 'virt' and
# adds a SCSI and a USB controller in the domain xml. # adds a SCSI and a USB controller in the domain xml.
# Possible values: x86_64 | aarch64 | s390x # Possible values: x86_64 | aarch64
# If null (default), defaults to the VM's OS architecture # If null (default), defaults to the VM's OS architecture
#guest.cpu.arch= #guest.cpu.arch=
# Specifies required CPU features for end-user and system VMs. # This param will require CPU features on the CPU section.
# These features must be present on the host CPU for VM deployment. # The features listed in this property must be separated by a blank space (e.g.: vmx vme)
# Multiple features should be separated by whitespace (e.g.: vmx vme).
#guest.cpu.features= #guest.cpu.features=
# Disables memory ballooning on VM guests for overcommit. # Disables memory ballooning on VM guests for overcommit.
@ -442,33 +441,3 @@ iscsi.session.cleanup.enabled=false
# Wait(in seconds) during agent reconnections. When no value is set then default value of 5s will be used # Wait(in seconds) during agent reconnections. When no value is set then default value of 5s will be used
#backoff.seconds= #backoff.seconds=
# Timeout (in seconds) to wait for the snapshot reversion to complete.
# revert.snapshot.timeout=10800
# Timeout (in seconds) to wait for the incremental snapshot to complete.
# incremental.snapshot.timeout=10800
# If set to true, creates VMs as full clones of their templates on KVM hypervisor. Creates as linked clones otherwise.
# create.full.clone=false
# Instance conversion TMPDIR env var
#convert.instance.env.tmpdir=
# Instance conversion VIRT_V2V_TMPDIR env var
#convert.instance.env.virtv2v.tmpdir=
# Time, in seconds, to wait before retrying to rebase during the incremental snapshot process.
# incremental.snapshot.retry.rebase.wait=60
# Path to the VDDK library directory for VMware to KVM conversion via VDDK,
# passed to virt-v2v as -io vddk-libdir=<path>
#vddk.lib.dir=
# Ordered VDDK transport preference for VMware to KVM conversion via VDDK, passed as
# -io vddk-transports=<value> to virt-v2v. Example: nbd:nbdssl
#vddk.transports=
# Optional vCenter SHA1 thumbprint for VMware to KVM conversion via VDDK, passed as
# -io vddk-thumbprint=<value>. If unset, CloudStack computes it on the KVM host via openssl.
#vddk.thumbprint=

View File

@ -1,24 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Configuration file for UEFI
guest.nvram.template.legacy=@GUESTNVRAMTEMPLATELEGACY@
guest.loader.legacy=@GUESTLOADERLEGACY@
guest.nvram.template.secure=@GUESTNVRAMTEMPLATESECURE@
guest.loader.secure=@GUESTLOADERSECURE@
guest.nvram.path=@GUESTNVRAMPATH@

View File

@ -24,7 +24,7 @@
<parent> <parent>
<groupId>org.apache.cloudstack</groupId> <groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack</artifactId> <artifactId>cloudstack</artifactId>
<version>4.23.0.0-SNAPSHOT</version> <version>4.20.3.0</version>
</parent> </parent>
<dependencies> <dependencies>
<dependency> <dependency>

View File

@ -26,8 +26,6 @@ import java.net.Socket;
import java.net.UnknownHostException; import java.net.UnknownHostException;
import java.nio.channels.ClosedChannelException; import java.nio.channels.ClosedChannelException;
import java.nio.charset.Charset; import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -59,6 +57,7 @@ import org.apache.cloudstack.utils.security.KeyStoreUtils;
import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.ObjectUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.ThreadContext; import org.apache.logging.log4j.ThreadContext;
@ -70,8 +69,6 @@ import com.cloud.agent.api.Command;
import com.cloud.agent.api.CronCommand; import com.cloud.agent.api.CronCommand;
import com.cloud.agent.api.MaintainAnswer; import com.cloud.agent.api.MaintainAnswer;
import com.cloud.agent.api.MaintainCommand; import com.cloud.agent.api.MaintainCommand;
import com.cloud.agent.api.MigrateAgentConnectionAnswer;
import com.cloud.agent.api.MigrateAgentConnectionCommand;
import com.cloud.agent.api.PingAnswer; import com.cloud.agent.api.PingAnswer;
import com.cloud.agent.api.PingCommand; import com.cloud.agent.api.PingCommand;
import com.cloud.agent.api.ReadyCommand; import com.cloud.agent.api.ReadyCommand;
@ -87,7 +84,6 @@ import com.cloud.resource.ResourceStatusUpdater;
import com.cloud.resource.ServerResource; import com.cloud.resource.ServerResource;
import com.cloud.utils.NumbersUtil; import com.cloud.utils.NumbersUtil;
import com.cloud.utils.PropertiesUtil; import com.cloud.utils.PropertiesUtil;
import com.cloud.utils.StringUtils;
import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.concurrency.NamedThreadFactory;
import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.exception.NioConnectionException; import com.cloud.utils.exception.NioConnectionException;
@ -341,8 +337,9 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
logger.info("Attempted to connect to the server, but received an unexpected exception, trying again...", e); logger.info("Attempted to connect to the server, but received an unexpected exception, trying again...", e);
} }
} }
shell.updateConnectedHost(((NioClient)connection).getHost()); shell.updateConnectedHost();
scavengeOldAgentObjects(); scavengeOldAgentObjects();
} }
public void stop(final String reason, final String detail) { public void stop(final String reason, final String detail) {
@ -452,30 +449,22 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
certExecutor.schedule(new PostCertificateRenewalTask(this), 5, TimeUnit.SECONDS); certExecutor.schedule(new PostCertificateRenewalTask(this), 5, TimeUnit.SECONDS);
} }
private void scheduleHostLBCheckerTask(final String lbAlgorithm, final long checkInterval) { private void scheduleHostLBCheckerTask(final long checkInterval) {
String name = "HostLBCheckerTask"; String name = "HostLBCheckerTask";
if (hostLbCheckExecutor != null && !hostLbCheckExecutor.isShutdown()) { if (hostLbCheckExecutor != null && !hostLbCheckExecutor.isShutdown()) {
logger.info("Shutting down the preferred host checker task {}", name);
hostLbCheckExecutor.shutdown(); hostLbCheckExecutor.shutdown();
try { try {
if (!hostLbCheckExecutor.awaitTermination(1, TimeUnit.SECONDS)) { if (!hostLbCheckExecutor.awaitTermination(1, TimeUnit.SECONDS)) {
hostLbCheckExecutor.shutdownNow(); hostLbCheckExecutor.shutdownNow();
} }
} catch (InterruptedException e) { } catch (InterruptedException e) {
logger.debug("Forcing the preferred host checker task {} shutdown as it did not shutdown in the desired time due to: {}", logger.debug("Forcing {} shutdown as it did not shutdown in the desired time due to: {}",
name, e.getMessage()); name, e.getMessage());
hostLbCheckExecutor.shutdownNow(); hostLbCheckExecutor.shutdownNow();
} }
} }
if (checkInterval > 0L) { if (checkInterval > 0L) {
if ("shuffle".equalsIgnoreCase(lbAlgorithm)) { logger.info("Scheduling preferred host task with host.lb.interval={}ms", checkInterval);
logger.info("Scheduling the preferred host checker task to trigger once (to apply lb algorithm '{}') after host.lb.interval={} ms", lbAlgorithm, checkInterval);
hostLbCheckExecutor = Executors.newSingleThreadScheduledExecutor((new NamedThreadFactory(name)));
hostLbCheckExecutor.schedule(new PreferredHostCheckerTask(), checkInterval, TimeUnit.MILLISECONDS);
return;
}
logger.info("Scheduling a recurring preferred host checker task with host.lb.interval={} ms", checkInterval);
hostLbCheckExecutor = Executors.newSingleThreadScheduledExecutor((new NamedThreadFactory(name))); hostLbCheckExecutor = Executors.newSingleThreadScheduledExecutor((new NamedThreadFactory(name)));
hostLbCheckExecutor.scheduleAtFixedRate(new PreferredHostCheckerTask(), checkInterval, checkInterval, hostLbCheckExecutor.scheduleAtFixedRate(new PreferredHostCheckerTask(), checkInterval, checkInterval,
TimeUnit.MILLISECONDS); TimeUnit.MILLISECONDS);
@ -549,10 +538,6 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
} }
public void sendStartup(final Link link) { public void sendStartup(final Link link) {
sendStartup(link, false);
}
public void sendStartup(final Link link, boolean transfer) {
final StartupCommand[] startup = serverResource.initialize(); final StartupCommand[] startup = serverResource.initialize();
if (startup != null) { if (startup != null) {
final String msHostList = shell.getPersistentProperty(null, "host"); final String msHostList = shell.getPersistentProperty(null, "host");
@ -560,7 +545,6 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
for (int i = 0; i < startup.length; i++) { for (int i = 0; i < startup.length; i++) {
setupStartupCommand(startup[i]); setupStartupCommand(startup[i]);
startup[i].setMSHostList(msHostList); startup[i].setMSHostList(msHostList);
startup[i].setConnectionTransferred(transfer);
commands[i] = startup[i]; commands[i] = startup[i];
} }
final Request request = new Request(id != null ? id : -1, -1, commands, false, false); final Request request = new Request(id != null ? id : -1, -1, commands, false, false);
@ -623,46 +607,6 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
return new ServerHandler(type, link, data); return new ServerHandler(type, link, data);
} }
protected void reconnect(final Link link) {
reconnect(link, null, false);
}
protected void reconnect(final Link link, String preferredMSHost, boolean forTransfer) {
if (!(forTransfer || reconnectAllowed)) {
logger.debug("Reconnect requested but it is not allowed {}", () -> getLinkLog(link));
return;
}
cancelStartupTask();
closeAndTerminateLink(link);
closeAndTerminateLink(this.link);
setLink(null);
cancelTasks();
serverResource.disconnected();
logger.info("Lost connection to host: {}. Attempting reconnection while we still have {} commands in progress.", shell.getConnectedHost(), commandsInProgress.get());
stopAndCleanupConnection(true);
String host = preferredMSHost;
if (org.apache.commons.lang3.StringUtils.isBlank(host)) {
host = shell.getNextHost();
}
List<String> avoidMSHostList = shell.getAvoidHosts();
do {
if (CollectionUtils.isEmpty(avoidMSHostList) || !avoidMSHostList.contains(host)) {
connection = new NioClient(getAgentName(), host, shell.getPort(), shell.getWorkers(), shell.getSslHandshakeTimeout(), this);
logger.info("Reconnecting to host: {}", host);
try {
connection.start();
} catch (final NioConnectionException e) {
logger.info("Attempted to re-connect to the server, but received an unexpected exception, trying again...", e);
stopAndCleanupConnection(false);
}
}
shell.getBackoffAlgorithm().waitBeforeRetry();
host = shell.getNextHost();
} while (!connection.isStartup());
shell.updateConnectedHost(((NioClient)connection).getHost());
logger.info("Connected to the host: {}", shell.getConnectedHost());
}
protected void closeAndTerminateLink(final Link link) { protected void closeAndTerminateLink(final Link link) {
if (link == null) { if (link == null) {
return; return;
@ -689,6 +633,35 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
} while (connection.isStartup()); } while (connection.isStartup());
} }
protected void reconnect(final Link link) {
if (!reconnectAllowed) {
logger.debug("Reconnect requested but it is not allowed {}", () -> getLinkLog(link));
return;
}
cancelStartupTask();
closeAndTerminateLink(link);
closeAndTerminateLink(this.link);
setLink(null);
cancelTasks();
serverResource.disconnected();
logger.info("Lost connection to host: {}. Attempting reconnection while we still have {} commands in progress.", shell.getConnectedHost(), commandsInProgress.get());
stopAndCleanupConnection(true);
do {
final String host = shell.getNextHost();
connection = new NioClient(getAgentName(), host, shell.getPort(), shell.getWorkers(), shell.getSslHandshakeTimeout(), this);
logger.info("Reconnecting to host: {}", host);
try {
connection.start();
} catch (final NioConnectionException e) {
logger.info("Attempted to re-connect to the server, but received an unexpected exception, trying again...", e);
stopAndCleanupConnection(false);
}
shell.getBackoffAlgorithm().waitBeforeRetry();
} while (!connection.isStartup());
shell.updateConnectedHost();
logger.info("Connected to the host: {}", shell.getConnectedHost());
}
public void processStartupAnswer(final Answer answer, final Response response, final Link link) { public void processStartupAnswer(final Answer answer, final Response response, final Link link) {
boolean answerValid = cancelStartupTask(); boolean answerValid = cancelStartupTask();
final StartupAnswer startup = (StartupAnswer)answer; final StartupAnswer startup = (StartupAnswer)answer;
@ -735,9 +708,6 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
final Command cmd = cmds[i]; final Command cmd = cmds[i];
Answer answer; Answer answer;
try { try {
if (cmd.getContextParam("logid") != null) {
ThreadContext.put("logcontextid", cmd.getContextParam("logid"));
}
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
if (!requestLogged) // ensures request is logged only once per method call if (!requestLogged) // ensures request is logged only once per method call
{ {
@ -799,17 +769,12 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
} }
} else if (cmd instanceof SetupMSListCommand) { } else if (cmd instanceof SetupMSListCommand) {
answer = setupManagementServerList((SetupMSListCommand) cmd); answer = setupManagementServerList((SetupMSListCommand) cmd);
} else if (cmd instanceof MigrateAgentConnectionCommand) {
answer = migrateAgentToOtherMS((MigrateAgentConnectionCommand) cmd);
} else { } else {
if (cmd instanceof ReadyCommand) { if (cmd instanceof ReadyCommand) {
processReadyCommand(cmd); processReadyCommand(cmd);
} }
commandsInProgress.incrementAndGet(); commandsInProgress.incrementAndGet();
try { try {
if (cmd.isReconcile()) {
cmd.setRequestSequence(request.getSequence());
}
answer = serverResource.executeRequest(cmd); answer = serverResource.executeRequest(cmd);
} finally { } finally {
commandsInProgress.decrementAndGet(); commandsInProgress.decrementAndGet();
@ -935,7 +900,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
return new SetupCertificateAnswer(true); return new SetupCertificateAnswer(true);
} }
private void processManagementServerList(final List<String> msList, final List<String> avoidMsList, final String lbAlgorithm, final Long lbCheckInterval, final boolean triggerHostLB) { private void processManagementServerList(final List<String> msList, final String lbAlgorithm, final Long lbCheckInterval) {
if (CollectionUtils.isNotEmpty(msList) && StringUtils.isNotEmpty(lbAlgorithm)) { if (CollectionUtils.isNotEmpty(msList) && StringUtils.isNotEmpty(lbAlgorithm)) {
try { try {
final String newMSHosts = String.format("%s%s%s", com.cloud.utils.StringUtils.toCSVList(msList), IAgentShell.hostLbAlgorithmSeparator, lbAlgorithm); final String newMSHosts = String.format("%s%s%s", com.cloud.utils.StringUtils.toCSVList(msList), IAgentShell.hostLbAlgorithmSeparator, lbAlgorithm);
@ -947,73 +912,18 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
throw new CloudRuntimeException("Could not persist received management servers list", e); throw new CloudRuntimeException("Could not persist received management servers list", e);
} }
} }
shell.setAvoidHosts(avoidMsList); if ("shuffle".equals(lbAlgorithm)) {
if (triggerHostLB) { scheduleHostLBCheckerTask(0);
logger.info("Triggering the preferred host checker task now"); } else {
ScheduledExecutorService hostLbExecutor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("HostLB-Executor")); scheduleHostLBCheckerTask(shell.getLbCheckerInterval(lbCheckInterval));
hostLbExecutor.schedule(new PreferredHostCheckerTask(), 0, TimeUnit.MILLISECONDS);
hostLbExecutor.shutdown();
} }
scheduleHostLBCheckerTask(lbAlgorithm, shell.getLbCheckerInterval(lbCheckInterval));
} }
private Answer setupManagementServerList(final SetupMSListCommand cmd) { private Answer setupManagementServerList(final SetupMSListCommand cmd) {
processManagementServerList(cmd.getMsList(), cmd.getAvoidMsList(), cmd.getLbAlgorithm(), cmd.getLbCheckInterval(), cmd.getTriggerHostLb()); processManagementServerList(cmd.getMsList(), cmd.getLbAlgorithm(), cmd.getLbCheckInterval());
return new SetupMSListAnswer(true); return new SetupMSListAnswer(true);
} }
private Answer migrateAgentToOtherMS(final MigrateAgentConnectionCommand cmd) {
try {
if (CollectionUtils.isNotEmpty(cmd.getMsList())) {
processManagementServerList(cmd.getMsList(), cmd.getAvoidMsList(), cmd.getLbAlgorithm(), cmd.getLbCheckInterval(), false);
}
ScheduledExecutorService migrateAgentConnectionService = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("MigrateAgentConnection-Job"));
migrateAgentConnectionService.schedule(() -> {
migrateAgentConnection(cmd.getAvoidMsList());
}, 3, TimeUnit.SECONDS);
migrateAgentConnectionService.shutdown();
} catch (Exception e) {
String errMsg = "Migrate agent connection failed, due to " + e.getMessage();
logger.debug(errMsg, e);
return new MigrateAgentConnectionAnswer(errMsg);
}
return new MigrateAgentConnectionAnswer(true);
}
private void migrateAgentConnection(List<String> avoidMsList) {
final String[] msHosts = shell.getHosts();
if (msHosts == null || msHosts.length < 1) {
throw new CloudRuntimeException("Management Server hosts empty, not properly configured in agent");
}
List<String> msHostsList = new ArrayList<>(Arrays.asList(msHosts));
msHostsList.removeAll(avoidMsList);
if (msHostsList.isEmpty() || StringUtils.isEmpty(msHostsList.get(0))) {
throw new CloudRuntimeException("No other Management Server hosts to migrate");
}
String preferredMSHost = null;
for (String msHost : msHostsList) {
try (final Socket socket = new Socket()) {
socket.connect(new InetSocketAddress(msHost, shell.getPort()), 5000);
preferredMSHost = msHost;
break;
} catch (final IOException e) {
throw new CloudRuntimeException("Management server host: " + msHost + " is not reachable, to migrate connection");
}
}
if (preferredMSHost == null) {
throw new CloudRuntimeException("Management server host(s) are not reachable, to migrate connection");
}
logger.debug("Management server host " + preferredMSHost + " is found to be reachable, trying to reconnect");
shell.resetHostCounter();
shell.setAvoidHosts(avoidMsList);
shell.setConnectionTransfer(true);
reconnect(link, preferredMSHost, true);
}
public void processResponse(final Response response, final Link link) { public void processResponse(final Response response, final Link link) {
final Answer answer = response.getAnswer(); final Answer answer = response.getAnswer();
logger.debug("Received response: {}", response.toString()); logger.debug("Received response: {}", response.toString());
@ -1024,21 +934,12 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
for (final IAgentControlListener listener : controlListeners) { for (final IAgentControlListener listener : controlListeners) {
listener.processControlResponse(response, (AgentControlAnswer)answer); listener.processControlResponse(response, (AgentControlAnswer)answer);
} }
} else if (answer instanceof PingAnswer) { } else if (answer instanceof PingAnswer && (((PingAnswer) answer).isSendStartup()) && reconnectAllowed) {
processPingAnswer((PingAnswer) answer);
} else {
updateLastPingResponseTime();
}
}
private void processPingAnswer(final PingAnswer answer) {
if ((answer.isSendStartup()) && reconnectAllowed) {
logger.info("Management server requested startup command to reinitialize the agent"); logger.info("Management server requested startup command to reinitialize the agent");
sendStartup(link); sendStartup(link);
} else { } else {
serverResource.processPingAnswer((PingAnswer) answer); updateLastPingResponseTime();
} }
shell.setAvoidHosts(answer.getAvoidMsList());
} }
public void processReadyCommand(final Command cmd) { public void processReadyCommand(final Command cmd) {
@ -1057,7 +958,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
} }
verifyAgentArch(ready.getArch()); verifyAgentArch(ready.getArch());
processManagementServerList(ready.getMsHostList(), ready.getAvoidMsHostList(), ready.getLbAlgorithm(), ready.getLbCheckInterval(), false); processManagementServerList(ready.getMsHostList(), ready.getLbAlgorithm(), ready.getLbCheckInterval());
logger.info("Ready command is processed for agent [id: {}, uuid: {}, name: {}]", getId(), getUuid(), getName()); logger.info("Ready command is processed for agent [id: {}, uuid: {}, name: {}]", getId(), getUuid(), getName());
} }
@ -1103,9 +1004,6 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
Answer answer = null; Answer answer = null;
commandsInProgress.incrementAndGet(); commandsInProgress.incrementAndGet();
try { try {
if (command.isReconcile()) {
command.setRequestSequence(req.getSequence());
}
answer = serverResource.executeRequest(command); answer = serverResource.executeRequest(command);
} finally { } finally {
commandsInProgress.decrementAndGet(); commandsInProgress.decrementAndGet();
@ -1229,12 +1127,12 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
public class WatchTask implements Runnable { public class WatchTask implements Runnable {
protected Request _request; protected Request _request;
protected Agent _agent; protected Agent _agent;
protected Link link; protected Link _link;
public WatchTask(final Link link, final Request request, final Agent agent) { public WatchTask(final Link link, final Request request, final Agent agent) {
super(); super();
_request = request; _request = request;
this.link = link; _link = link;
_agent = agent; _agent = agent;
} }
@ -1243,9 +1141,9 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
logger.trace("Scheduling {}", (_request instanceof Response ? "Ping" : "Watch Task")); logger.trace("Scheduling {}", (_request instanceof Response ? "Ping" : "Watch Task"));
try { try {
if (_request instanceof Response) { if (_request instanceof Response) {
outRequestHandler.submit(new ServerHandler(Task.Type.OTHER, link, _request)); outRequestHandler.submit(new ServerHandler(Task.Type.OTHER, _link, _request));
} else { } else {
link.schedule(new ServerHandler(Task.Type.OTHER, link, _request)); _link.schedule(new ServerHandler(Task.Type.OTHER, _link, _request));
} }
} catch (final ClosedChannelException e) { } catch (final ClosedChannelException e) {
logger.warn("Unable to schedule task because channel is closed"); logger.warn("Unable to schedule task because channel is closed");
@ -1254,12 +1152,12 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
} }
public class StartupTask implements Runnable { public class StartupTask implements Runnable {
protected Link link; protected Link _link;
private final AtomicBoolean cancelled = new AtomicBoolean(false); private final AtomicBoolean cancelled = new AtomicBoolean(false);
public StartupTask(final Link link) { public StartupTask(final Link link) {
logger.debug("Startup task created"); logger.debug("Startup task created");
this.link = link; _link = link;
} }
public boolean cancel() { public boolean cancel() {
@ -1277,8 +1175,8 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
logger.info("The running startup command is now invalid. Attempting reconnect"); logger.info("The running startup command is now invalid. Attempting reconnect");
startupTask.set(null); startupTask.set(null);
startupWait = DEFAULT_STARTUP_WAIT * 2; startupWait = DEFAULT_STARTUP_WAIT * 2;
logger.debug("Executing reconnect from task - {}", () -> getLinkLog(link)); logger.debug("Executing reconnect from task - {}", () -> getLinkLog(_link));
reconnect(link); reconnect(_link);
} }
} }
} }
@ -1311,8 +1209,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
if (task.getType() == Task.Type.CONNECT) { if (task.getType() == Task.Type.CONNECT) {
shell.getBackoffAlgorithm().reset(); shell.getBackoffAlgorithm().reset();
setLink(task.getLink()); setLink(task.getLink());
sendStartup(task.getLink(), shell.isConnectionTransfer()); sendStartup(task.getLink());
shell.setConnectionTransfer(false);
} else if (task.getType() == Task.Type.DATA) { } else if (task.getType() == Task.Type.DATA) {
Request request; Request request;
try { try {
@ -1322,6 +1219,7 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
processResponse((Response)request, task.getLink()); processResponse((Response)request, task.getLink());
} else { } else {
//put the requests from mgt server into another thread pool, as the request may take a longer time to finish. Don't block the NIO main thread pool //put the requests from mgt server into another thread pool, as the request may take a longer time to finish. Don't block the NIO main thread pool
//processRequest(request, task.getLink());
requestHandler.submit(new AgentRequestHandler(getType(), getLink(), request)); requestHandler.submit(new AgentRequestHandler(getType(), getLink(), request));
} }
} catch (final ClassNotFoundException e) { } catch (final ClassNotFoundException e) {
@ -1337,7 +1235,6 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
Thread.sleep(5000); Thread.sleep(5000);
} catch (InterruptedException e) { } catch (InterruptedException e) {
} }
shell.setConnectionTransfer(false);
logger.debug("Executing disconnect task - {} and reconnecting", () -> getLinkLog(task.getLink())); logger.debug("Executing disconnect task - {} and reconnecting", () -> getLinkLog(task.getLink()));
reconnect(task.getLink()); reconnect(task.getLink());
} else if (task.getType() == Task.Type.OTHER) { } else if (task.getType() == Task.Type.OTHER) {
@ -1407,26 +1304,26 @@ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater
if (msList == null || msList.length < 1) { if (msList == null || msList.length < 1) {
return; return;
} }
final String preferredMSHost = msList[0]; final String preferredHost = msList[0];
final String connectedHost = shell.getConnectedHost(); final String connectedHost = shell.getConnectedHost();
logger.debug("Running preferred host checker task, connected host={}, preferred host={}", logger.debug("Running preferred host checker task, connected host={}, preferred host={}",
connectedHost, preferredMSHost); connectedHost, preferredHost);
if (preferredMSHost == null || preferredMSHost.equals(connectedHost) || link == null) { if (preferredHost == null || preferredHost.equals(connectedHost) || link == null) {
return; return;
} }
boolean isHostUp = false; boolean isHostUp = false;
try (final Socket socket = new Socket()) { try (final Socket socket = new Socket()) {
socket.connect(new InetSocketAddress(preferredMSHost, shell.getPort()), 5000); socket.connect(new InetSocketAddress(preferredHost, shell.getPort()), 5000);
isHostUp = true; isHostUp = true;
} catch (final IOException e) { } catch (final IOException e) {
logger.debug("Host: {} is not reachable", preferredMSHost); logger.debug("Host: {} is not reachable", preferredHost);
} }
if (isHostUp && link != null && commandsInProgress.get() == 0) { if (isHostUp && link != null && commandsInProgress.get() == 0) {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("Preferred host {} is found to be reachable, trying to reconnect", preferredMSHost); logger.debug("Preferred host {} is found to be reachable, trying to reconnect", preferredHost);
} }
shell.resetHostCounter(); shell.resetHostCounter();
reconnect(link, preferredMSHost, false); reconnect(link);
} }
} catch (Throwable t) { } catch (Throwable t) {
logger.error("Error caught while attempting to connect to preferred host", t); logger.error("Error caught while attempting to connect to preferred host", t);

View File

@ -66,7 +66,6 @@ public class AgentShell implements IAgentShell, Daemon {
private String _zone; private String _zone;
private String _pod; private String _pod;
private String _host; private String _host;
private List<String> _avoidHosts;
private String _privateIp; private String _privateIp;
private int _port; private int _port;
private int _proxyPort; private int _proxyPort;
@ -77,9 +76,9 @@ public class AgentShell implements IAgentShell, Daemon {
private volatile boolean _exit = false; private volatile boolean _exit = false;
private int _pingRetries; private int _pingRetries;
private final List<Agent> _agents = new ArrayList<Agent>(); private final List<Agent> _agents = new ArrayList<Agent>();
private String hostToConnect;
private String connectedHost; private String connectedHost;
private Long preferredHostCheckInterval; private Long preferredHostCheckInterval;
private boolean connectionTransfer = false;
protected AgentProperties agentProperties = new AgentProperties(); protected AgentProperties agentProperties = new AgentProperties();
public AgentShell() { public AgentShell() {
@ -121,7 +120,7 @@ public class AgentShell implements IAgentShell, Daemon {
if (_hostCounter >= hosts.length) { if (_hostCounter >= hosts.length) {
_hostCounter = 0; _hostCounter = 0;
} }
String hostToConnect = hosts[_hostCounter % hosts.length]; hostToConnect = hosts[_hostCounter % hosts.length];
_hostCounter++; _hostCounter++;
return hostToConnect; return hostToConnect;
} }
@ -143,10 +142,11 @@ public class AgentShell implements IAgentShell, Daemon {
} }
@Override @Override
public void updateConnectedHost(String connectedHost) { public void updateConnectedHost() {
this.connectedHost = connectedHost; connectedHost = hostToConnect;
} }
@Override @Override
public void resetHostCounter() { public void resetHostCounter() {
_hostCounter = 0; _hostCounter = 0;
@ -165,16 +165,6 @@ public class AgentShell implements IAgentShell, Daemon {
} }
} }
@Override
public void setAvoidHosts(List<String> avoidHosts) {
_avoidHosts = avoidHosts;
}
@Override
public List<String> getAvoidHosts() {
return _avoidHosts;
}
@Override @Override
public String getPrivateIp() { public String getPrivateIp() {
return _privateIp; return _privateIp;
@ -227,14 +217,6 @@ public class AgentShell implements IAgentShell, Daemon {
_storage.persist(name, value); _storage.persist(name, value);
} }
public boolean isConnectionTransfer() {
return connectionTransfer;
}
public void setConnectionTransfer(boolean connectionTransfer) {
this.connectionTransfer = connectionTransfer;
}
void loadProperties() throws ConfigurationException { void loadProperties() throws ConfigurationException {
final File file = PropertiesUtil.findConfigFile("agent.properties"); final File file = PropertiesUtil.findConfigFile("agent.properties");

View File

@ -16,7 +16,6 @@
// under the License. // under the License.
package com.cloud.agent; package com.cloud.agent;
import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Properties; import java.util.Properties;
@ -64,21 +63,13 @@ public interface IAgentShell {
String[] getHosts(); String[] getHosts();
void setAvoidHosts(List<String> hosts);
List<String> getAvoidHosts();
long getLbCheckerInterval(Long receivedLbInterval); long getLbCheckerInterval(Long receivedLbInterval);
void updateConnectedHost(String connectedHost); void updateConnectedHost();
String getConnectedHost(); String getConnectedHost();
void launchNewAgent(ServerResource resource) throws ConfigurationException; void launchNewAgent(ServerResource resource) throws ConfigurationException;
boolean isConnectionTransfer();
void setConnectionTransfer(boolean connectionTransfer);
Integer getSslHandshakeTimeout(); Integer getSslHandshakeTimeout();
} }

View File

@ -117,7 +117,7 @@ public class AgentProperties{
/** /**
* Local storage path.<br> * Local storage path.<br>
* This property allows multiple values to be entered in a single String. The different values must be separated by commas.<br> * This property allows multiple values to be entered in a single String. The differente values must be separated by commas.<br>
* Data type: String.<br> * Data type: String.<br>
* Default value: <code>/var/lib/libvirt/images/</code> * Default value: <code>/var/lib/libvirt/images/</code>
*/ */
@ -134,7 +134,7 @@ public class AgentProperties{
/** /**
* MANDATORY: The UUID for the local storage pool.<br> * MANDATORY: The UUID for the local storage pool.<br>
* This property allows multiple values to be entered in a single String. The different values must be separated by commas.<br> * This property allows multiple values to be entered in a single String. The differente values must be separated by commas.<br>
* Data type: String.<br> * Data type: String.<br>
* Default value: <code>null</code> * Default value: <code>null</code>
*/ */
@ -155,14 +155,6 @@ public class AgentProperties{
*/ */
public static final Property<Integer> CMDS_TIMEOUT = new Property<>("cmds.timeout", 7200); public static final Property<Integer> CMDS_TIMEOUT = new Property<>("cmds.timeout", 7200);
/**
* The timeout (in seconds) for the snapshot merge operation, mainly used for classic volume snapshots and disk-only VM snapshots on file-based storage.<br>
* This configuration is only considered if libvirt.events.enabled is also true. <br>
* Data type: Integer.<br>
* Default value: <code>259200</code>
*/
public static final Property<Integer> QCOW2_DELTA_MERGE_TIMEOUT = new Property<>("qcow2.delta.merge.timeout", 60 * 60 * 72);
/** /**
* This parameter sets the VM migration speed (in mbps). The default value is -1,<br> * This parameter sets the VM migration speed (in mbps). The default value is -1,<br>
* which means that the agent will try to guess the speed of the guest network and consume all possible bandwidth.<br> * which means that the agent will try to guess the speed of the guest network and consume all possible bandwidth.<br>
@ -221,15 +213,6 @@ public class AgentProperties{
*/ */
public static final Property<String> AGENT_HOOKS_LIBVIRT_VM_XML_TRANSFORMER_SCRIPT = new Property<>("agent.hooks.libvirt_vm_xml_transformer.script", "libvirt-vm-xml-transformer.groovy"); public static final Property<String> AGENT_HOOKS_LIBVIRT_VM_XML_TRANSFORMER_SCRIPT = new Property<>("agent.hooks.libvirt_vm_xml_transformer.script", "libvirt-vm-xml-transformer.groovy");
/**
* This property is used with the agent.hooks.basedir property to define the Libvirt VM XML transformer shell script.<br>
* The shell script is used to execute the Libvirt VM XML transformer script.<br>
* For more information see the agent.properties file.<br>
* Data type: String.<br>
* Default value: <code>libvirt-vm-xml-transformer.sh</code>
*/
public static final Property<String> AGENT_HOOKS_LIBVIRT_VM_XML_TRANSFORMER_SHELL_SCRIPT = new Property<>("agent.hooks.libvirt_vm_xml_transformer.shell_script", "libvirt-vm-xml-transformer.sh");
/** /**
* This property is used with the agent.hooks.basedir and agent.hooks.libvirt_vm_xml_transformer.script properties to define the Libvirt VM XML transformer method.<br> * This property is used with the agent.hooks.basedir and agent.hooks.libvirt_vm_xml_transformer.script properties to define the Libvirt VM XML transformer method.<br>
* Libvirt XML transformer hook does XML-to-XML transformation.<br> * Libvirt XML transformer hook does XML-to-XML transformation.<br>
@ -250,15 +233,6 @@ public class AgentProperties{
*/ */
public static final Property<String> AGENT_HOOKS_LIBVIRT_VM_ON_START_SCRIPT = new Property<>("agent.hooks.libvirt_vm_on_start.script", "libvirt-vm-state-change.groovy"); public static final Property<String> AGENT_HOOKS_LIBVIRT_VM_ON_START_SCRIPT = new Property<>("agent.hooks.libvirt_vm_on_start.script", "libvirt-vm-state-change.groovy");
/**
* This property is used with the agent.hooks.basedir property to define the Libvirt VM on start shell script.<br>
* The shell script is used to execute the Libvirt VM on start script.<br>
* For more information see the agent.properties file.<br>
* Data type: String.<br>
* Default value: <code>libvirt-vm-state-change.sh</code>
*/
public static final Property<String> AGENT_HOOKS_LIBVIRT_VM_ON_START_SHELL_SCRIPT = new Property<>("agent.hooks.libvirt_vm_on_start.shell_script", "libvirt-vm-state-change.sh");
/** /**
* This property is used with the agent.hooks.basedir and agent.hooks.libvirt_vm_on_start.script properties to define the Libvirt VM on start method.<br> * This property is used with the agent.hooks.basedir and agent.hooks.libvirt_vm_on_start.script properties to define the Libvirt VM on start method.<br>
* The hook is called right after Libvirt successfully launched the VM.<br> * The hook is called right after Libvirt successfully launched the VM.<br>
@ -278,15 +252,6 @@ public class AgentProperties{
*/ */
public static final Property<String> AGENT_HOOKS_LIBVIRT_VM_ON_STOP_SCRIPT = new Property<>("agent.hooks.libvirt_vm_on_stop.script", "libvirt-vm-state-change.groovy"); public static final Property<String> AGENT_HOOKS_LIBVIRT_VM_ON_STOP_SCRIPT = new Property<>("agent.hooks.libvirt_vm_on_stop.script", "libvirt-vm-state-change.groovy");
/**
* This property is used with the agent.hooks.basedir property to define the Libvirt VM on stop shell script.<br>
* The shell script is used to execute the Libvirt VM on stop script.<br>
* For more information see the agent.properties file.<br>
* Data type: String.<br>
* Default value: <code>libvirt-vm-state-change.sh</code>
*/
public static final Property<String> AGENT_HOOKS_LIBVIRT_VM_ON_STOP_SHELL_SCRIPT = new Property<>("agent.hooks.libvirt_vm_on_stop.shell_script", "libvirt-vm-state-change.sh");
/** /**
* This property is used with the agent.hooks.basedir and agent.hooks.libvirt_vm_on_stop.script properties to define the Libvirt VM on stop method.<br> * This property is used with the agent.hooks.basedir and agent.hooks.libvirt_vm_on_stop.script properties to define the Libvirt VM on stop method.<br>
* The hook is called right after libvirt successfully stopped the VM.<br> * The hook is called right after libvirt successfully stopped the VM.<br>
@ -418,16 +383,15 @@ public class AgentProperties{
/** /**
* This param will set the CPU architecture for the domain to override what the management server would send.<br> * This param will set the CPU architecture for the domain to override what the management server would send.<br>
* In case of arm64 (aarch64), this will change the machine type to 'virt' and add a SCSI and a USB controller in the domain XML.<br> * In case of arm64 (aarch64), this will change the machine type to 'virt' and add a SCSI and a USB controller in the domain XML.<br>
* Possible values: x86_64 | aarch64 | s390x <br> * Possible values: x86_64 | aarch64 <br>
* Data type: String.<br> * Data type: String.<br>
* Default value: <code>null</code> (will set use the architecture of the VM's OS). * Default value: <code>null</code> (will set use the architecture of the VM's OS).
*/ */
public static final Property<String> GUEST_CPU_ARCH = new Property<>("guest.cpu.arch", null, String.class); public static final Property<String> GUEST_CPU_ARCH = new Property<>("guest.cpu.arch", null, String.class);
/** /**
* Specifies required CPU features for end-user and system VMs.<br> * This param will require CPU features on the CPU section.<br>
* These features must be present on the host CPU for VM deployment.<br> * The features listed in this property must be separated by a blank space (see example below).<br>
* Multiple features should be separated by whitespace (see example below).<br>
* Possible values: vmx vme <br> * Possible values: vmx vme <br>
* Data type: String.<br> * Data type: String.<br>
* Default value: <code>null</code> * Default value: <code>null</code>
@ -794,44 +758,6 @@ public class AgentProperties{
*/ */
public static final Property<Boolean> VIRTV2V_VERBOSE_ENABLED = new Property<>("virtv2v.verbose.enabled", false); public static final Property<Boolean> VIRTV2V_VERBOSE_ENABLED = new Property<>("virtv2v.verbose.enabled", false);
/**
* Set env TMPDIR var for virt-v2v Instance Conversion from VMware to KVM
* Data type: String.<br>
* Default value: <code>null</code>
*/
public static final Property<String> CONVERT_ENV_TMPDIR = new Property<>("convert.instance.env.tmpdir", null, String.class);
/**
* Set env VIRT_V2V_TMPDIR var for virt-v2v Instance Conversion from VMware to KVM
* Data type: String.<br>
* Default value: <code>null</code>
*/
public static final Property<String> CONVERT_ENV_VIRTV2V_TMPDIR = new Property<>("convert.instance.env.virtv2v.tmpdir", null, String.class);
/**
* Path to the VDDK library directory on the KVM conversion host, used when converting VMs from VMware to KVM via VDDK.
* This directory is passed to virt-v2v as <code>-io vddk-libdir=&lt;path&gt;</code>.
* Data type: String.<br>
* Default value: <code>null</code>
*/
public static final Property<String> VDDK_LIB_DIR = new Property<>("vddk.lib.dir", null, String.class);
/**
* Ordered list of VDDK transports for virt-v2v, passed as <code>-io vddk-transports=&lt;value&gt;</code>.
* Example: <code>nbd:nbdssl</code>.
* Data type: String.<br>
* Default value: <code>null</code>
*/
public static final Property<String> VDDK_TRANSPORTS = new Property<>("vddk.transports", null, String.class);
/**
* vCenter TLS certificate thumbprint used by virt-v2v VDDK mode, passed as <code>-io vddk-thumbprint=&lt;value&gt;</code>.
* If unset, the KVM host computes it at runtime from the vCenter endpoint.
* Data type: String.<br>
* Default value: <code>null</code>
*/
public static final Property<String> VDDK_THUMBPRINT = new Property<>("vddk.thumbprint", null, String.class);
/** /**
* BGP controll CIDR * BGP controll CIDR
* Data type: String.<br> * Data type: String.<br>
@ -890,30 +816,7 @@ public class AgentProperties{
* Data type: Integer.<br> * Data type: Integer.<br>
* Default value: <code>null</code> * Default value: <code>null</code>
*/ */
public static final Property<Integer> SSL_HANDSHAKE_TIMEOUT = new Property<>("ssl.handshake.timeout", 30, Integer.class); public static final Property<Integer> SSL_HANDSHAKE_TIMEOUT = new Property<>("ssl.handshake.timeout", null, Integer.class);
/**
* Timeout (in seconds) to wait for the incremental snapshot to complete.
* */
public static final Property<Integer> INCREMENTAL_SNAPSHOT_TIMEOUT = new Property<>("incremental.snapshot.timeout", 10800);
/**
* Timeout (in seconds) to wait for the snapshot reversion to complete.
* */
public static final Property<Integer> REVERT_SNAPSHOT_TIMEOUT = new Property<>("revert.snapshot.timeout", 10800);
/**
* If set to true, creates VMs as full clones of their templates on KVM hypervisor. Creates as linked clones otherwise. <br>
* Data type: Boolean. <br>
* Default value: <code>false</code>
*/
public static final Property<Boolean> CREATE_FULL_CLONE = new Property<>("create.full.clone", false);
/**
* Time, in seconds, to wait before retrying to rebase during the incremental snapshot process.
* */
public static final Property<Integer> INCREMENTAL_SNAPSHOT_RETRY_REBASE_WAIT = new Property<>("incremental.snapshot.retry.rebase.wait", 60);
public static class Property <T>{ public static class Property <T>{
private String name; private String name;

View File

@ -20,6 +20,7 @@ import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.UUID;
import com.cloud.agent.IAgentControl; import com.cloud.agent.IAgentControl;
import com.cloud.agent.api.Answer; import com.cloud.agent.api.Answer;
@ -39,7 +40,6 @@ import com.cloud.resource.ServerResource;
import com.cloud.storage.Storage; import com.cloud.storage.Storage;
import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.utils.StringUtils; import com.cloud.utils.StringUtils;
import com.cloud.utils.UuidUtils;
public class DummyResource implements ServerResource { public class DummyResource implements ServerResource {
String _name; String _name;
@ -133,7 +133,7 @@ public class DummyResource implements ServerResource {
String hostIp = getConfiguredProperty("private.ip.address", "127.0.0.1"); String hostIp = getConfiguredProperty("private.ip.address", "127.0.0.1");
String localStoragePath = getConfiguredProperty("local.storage.path", "/mnt"); String localStoragePath = getConfiguredProperty("local.storage.path", "/mnt");
String lh = hostIp + localStoragePath; String lh = hostIp + localStoragePath;
String uuid = UuidUtils.nameUUIDFromBytes(lh.getBytes(StringUtils.getPreferredCharset())).toString(); String uuid = UUID.nameUUIDFromBytes(lh.getBytes(StringUtils.getPreferredCharset())).toString();
String capacity = getConfiguredProperty("local.storage.capacity", "1000000000"); String capacity = getConfiguredProperty("local.storage.capacity", "1000000000");
String available = getConfiguredProperty("local.storage.avail", "10000000"); String available = getConfiguredProperty("local.storage.avail", "10000000");

View File

@ -397,8 +397,9 @@ public class ConsoleProxyResource extends ServerResourceBase implements ServerRe
} }
public String authenticateConsoleAccess(String host, String port, String vmId, String sid, String ticket, public String authenticateConsoleAccess(String host, String port, String vmId, String sid, String ticket,
Boolean isReauthentication, String sessionToken, String clientAddress) { Boolean isReauthentication, String sessionToken) {
ConsoleAccessAuthenticationCommand cmd = new ConsoleAccessAuthenticationCommand(host, port, vmId, sid, ticket, sessionToken, clientAddress);
ConsoleAccessAuthenticationCommand cmd = new ConsoleAccessAuthenticationCommand(host, port, vmId, sid, ticket, sessionToken);
cmd.setReauthenticating(isReauthentication); cmd.setReauthenticating(isReauthentication);
ConsoleProxyAuthenticationResult result = new ConsoleProxyAuthenticationResult(); ConsoleProxyAuthenticationResult result = new ConsoleProxyAuthenticationResult();

View File

@ -358,7 +358,7 @@ public class AgentShellTest {
AgentShell shell = new AgentShell(); AgentShell shell = new AgentShell();
shell.setHosts("test"); shell.setHosts("test");
shell.getNextHost(); shell.getNextHost();
shell.updateConnectedHost("test"); shell.updateConnectedHost();
Assert.assertEquals(expected, shell.getConnectedHost()); Assert.assertEquals(expected, shell.getConnectedHost());
} }

View File

@ -24,7 +24,7 @@
<parent> <parent>
<groupId>org.apache.cloudstack</groupId> <groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack</artifactId> <artifactId>cloudstack</artifactId>
<version>4.23.0.0-SNAPSHOT</version> <version>4.20.3.0</version>
</parent> </parent>
<dependencies> <dependencies>
<dependency> <dependency>

View File

@ -19,10 +19,9 @@ package com.cloud.agent.api;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import com.cloud.agent.api.LogLevel.Log4jLevel; import com.cloud.agent.api.LogLevel.Log4jLevel;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
/** /**
* implemented by classes that extends the Command class. Command specifies * implemented by classes that extends the Command class. Command specifies
@ -36,23 +35,6 @@ public abstract class Command {
Continue, Stop Continue, Stop
} }
public enum State {
CREATED, // Command is created by management server
STARTED, // Command is started by agent
PROCESSING, // Processing by agent
PROCESSING_IN_BACKEND, // Processing in backend by agent
COMPLETED, // Operation succeeds by agent or management server
FAILED, // Operation fails by agent
RECONCILE_RETRY, // Ready for retry of reconciliation
RECONCILING, // Being reconciled by management server
RECONCILED, // Reconciled by management server
RECONCILE_SKIPPED, // Skip the reconciliation as the resource state is inconsistent with the command
RECONCILE_FAILED, // Fail to reconcile by management server
TIMED_OUT, // Timed out on management server or agent
INTERRUPTED, // Interrupted by management server or agent (for example agent is restarted),
DANGLED_IN_BACKEND // Backend process which cannot be processed normally (for example agent is restarted)
}
public static final String HYPERVISOR_TYPE = "hypervisorType"; public static final String HYPERVISOR_TYPE = "hypervisorType";
// allow command to carry over hypervisor or other environment related context info // allow command to carry over hypervisor or other environment related context info
@ -60,8 +42,6 @@ public abstract class Command {
protected Map<String, String> contextMap = new HashMap<String, String>(); protected Map<String, String> contextMap = new HashMap<String, String>();
private int wait; //in second private int wait; //in second
private boolean bypassHostMaintenance = false; private boolean bypassHostMaintenance = false;
private transient long requestSequence = 0L;
protected Map<String, Map<String, String>> externalDetails;
protected Command() { protected Command() {
this.wait = 0; this.wait = 0;
@ -102,10 +82,6 @@ public abstract class Command {
return contextMap.get(name); return contextMap.get(name);
} }
public Map<String, String> getContextMap() {
return contextMap;
}
public boolean allowCaching() { public boolean allowCaching() {
return true; return true;
} }
@ -118,26 +94,6 @@ public abstract class Command {
this.bypassHostMaintenance = bypassHostMaintenance; this.bypassHostMaintenance = bypassHostMaintenance;
} }
public boolean isReconcile() {
return false;
}
public long getRequestSequence() {
return requestSequence;
}
public void setRequestSequence(long requestSequence) {
this.requestSequence = requestSequence;
}
public void setExternalDetails(Map<String, Map<String, String>> externalDetails) {
this.externalDetails = externalDetails;
}
public Map<String, Map<String, String>> getExternalDetails() {
return externalDetails;
}
@Override @Override
public boolean equals(Object o) { public boolean equals(Object o) {
if (this == o) return true; if (this == o) return true;

View File

@ -15,24 +15,10 @@
// specific language governing permissions and limitations // specific language governing permissions and limitations
// under the License. // under the License.
package com.cloud.agent.api; package com.cloud.agent.api;
import org.apache.cloudstack.gpu.GpuDevice;
public class VgpuTypesInfo { public class VgpuTypesInfo {
private boolean passthroughEnabled = true;
private GpuDevice.DeviceType deviceType;
private String parentBusAddress;
private String busAddress;
private String numaNode;
private String pciRoot;
private String deviceId;
private String deviceName;
private String vendorId;
private String vendorName;
private String modelName; private String modelName;
private String groupName; private String groupName;
private String vmName;
private Long maxHeads; private Long maxHeads;
private Long videoRam; private Long videoRam;
private Long maxResolutionX; private Long maxResolutionX;
@ -40,7 +26,6 @@ public class VgpuTypesInfo {
private Long maxVgpuPerGpu; private Long maxVgpuPerGpu;
private Long remainingCapacity; private Long remainingCapacity;
private Long maxCapacity; private Long maxCapacity;
private boolean display = false;
public String getModelName() { public String getModelName() {
return modelName; return modelName;
@ -54,42 +39,22 @@ public class VgpuTypesInfo {
return videoRam; return videoRam;
} }
public void setVideoRam(Long videoRam) {
this.videoRam = videoRam;
}
public Long getMaxHeads() { public Long getMaxHeads() {
return maxHeads; return maxHeads;
} }
public void setMaxHeads(Long maxHeads) {
this.maxHeads = maxHeads;
}
public Long getMaxResolutionX() { public Long getMaxResolutionX() {
return maxResolutionX; return maxResolutionX;
} }
public void setMaxResolutionX(Long maxResolutionX) {
this.maxResolutionX = maxResolutionX;
}
public Long getMaxResolutionY() { public Long getMaxResolutionY() {
return maxResolutionY; return maxResolutionY;
} }
public void setMaxResolutionY(Long maxResolutionY) {
this.maxResolutionY = maxResolutionY;
}
public Long getMaxVpuPerGpu() { public Long getMaxVpuPerGpu() {
return maxVgpuPerGpu; return maxVgpuPerGpu;
} }
public void setMaxVgpuPerGpu(Long maxVgpuPerGpu) {
this.maxVgpuPerGpu = maxVgpuPerGpu;
}
public Long getRemainingCapacity() { public Long getRemainingCapacity() {
return remainingCapacity; return remainingCapacity;
} }
@ -106,133 +71,8 @@ public class VgpuTypesInfo {
this.maxCapacity = maxCapacity; this.maxCapacity = maxCapacity;
} }
public boolean isPassthroughEnabled() { public VgpuTypesInfo(String groupName, String modelName, Long videoRam, Long maxHeads, Long maxResolutionX, Long maxResolutionY, Long maxVgpuPerGpu,
return passthroughEnabled; Long remainingCapacity, Long maxCapacity) {
}
public void setPassthroughEnabled(boolean passthroughEnabled) {
this.passthroughEnabled = passthroughEnabled;
}
public GpuDevice.DeviceType getDeviceType() {
return deviceType;
}
public void setDeviceType(GpuDevice.DeviceType deviceType) {
this.deviceType = deviceType;
}
public String getParentBusAddress() {
return parentBusAddress;
}
public void setParentBusAddress(String parentBusAddress) {
this.parentBusAddress = parentBusAddress;
}
public String getBusAddress() {
return busAddress;
}
public void setBusAddress(String busAddress) {
this.busAddress = busAddress;
}
public String getNumaNode() {
return numaNode;
}
public void setNumaNode(String numaNode) {
this.numaNode = numaNode;
}
public String getPciRoot() {
return pciRoot;
}
public void setPciRoot(String pciRoot) {
this.pciRoot = pciRoot;
}
public String getDeviceId() {
return deviceId;
}
public void setDeviceId(String deviceId) {
this.deviceId = deviceId;
}
public String getDeviceName() {
return deviceName;
}
public void setDeviceName(String deviceName) {
this.deviceName = deviceName;
}
public String getVendorId() {
return vendorId;
}
public void setVendorId(String vendorId) {
this.vendorId = vendorId;
}
public String getVendorName() {
return vendorName;
}
public void setVendorName(String vendorName) {
this.vendorName = vendorName;
}
public String getVmName() {
return vmName;
}
public void setVmName(String vmName) {
this.vmName = vmName;
}
public boolean isDisplay() {
return display;
}
public void setDisplay(boolean display) {
this.display = display;
}
public VgpuTypesInfo(GpuDevice.DeviceType deviceType, String groupName, String modelName, String busAddress,
String vendorId, String vendorName, String deviceId, String deviceName, String numaNode, String pciRoot
) {
this.deviceType = deviceType;
this.groupName = groupName;
this.modelName = modelName;
this.busAddress = busAddress;
this.deviceId = deviceId;
this.deviceName = deviceName;
this.vendorId = vendorId;
this.vendorName = vendorName;
this.numaNode = numaNode;
this.pciRoot = pciRoot;
}
public VgpuTypesInfo(GpuDevice.DeviceType deviceType, String groupName, String modelName, String busAddress,
String vendorId, String vendorName, String deviceId, String deviceName
) {
this.deviceType = deviceType;
this.groupName = groupName;
this.modelName = modelName;
this.busAddress = busAddress;
this.deviceId = deviceId;
this.deviceName = deviceName;
this.vendorId = vendorId;
this.vendorName = vendorName;
}
public VgpuTypesInfo(String groupName, String modelName, Long videoRam, Long maxHeads, Long maxResolutionX,
Long maxResolutionY, Long maxVgpuPerGpu, Long remainingCapacity, Long maxCapacity
) {
this.groupName = groupName; this.groupName = groupName;
this.modelName = modelName; this.modelName = modelName;
this.videoRam = videoRam; this.videoRam = videoRam;

View File

@ -119,7 +119,8 @@ public class OVFHelper {
boolean password = StringUtils.isNotBlank(passStr) && passStr.equalsIgnoreCase("true"); boolean password = StringUtils.isNotBlank(passStr) && passStr.equalsIgnoreCase("true");
String label = ovfParser.getChildNodeValue(node, "Label"); String label = ovfParser.getChildNodeValue(node, "Label");
String description = ovfParser.getChildNodeValue(node, "Description"); String description = ovfParser.getChildNodeValue(node, "Description");
logger.debug("Creating OVF property index {} {} with key = {}", index, (category == null ? "" : " for category " + category), key); logger.debug("Creating OVF property index " + index + (category == null ? "" : " for category " + category)
+ " with key = " + key);
return new OVFPropertyTO(key, type, value, qualifiers, userConfigurable, return new OVFPropertyTO(key, type, value, qualifiers, userConfigurable,
label, description, password, index, category); label, description, password, index, category);
} }
@ -151,7 +152,7 @@ public class OVFHelper {
if (child.getNodeName().equalsIgnoreCase("Category") || if (child.getNodeName().equalsIgnoreCase("Category") ||
child.getNodeName().endsWith(":Category")) { child.getNodeName().endsWith(":Category")) {
lastCategoryFound = child.getTextContent(); lastCategoryFound = child.getTextContent();
logger.info("Category found {}", lastCategoryFound); logger.info("Category found " + lastCategoryFound);
} else if (child.getNodeName().equalsIgnoreCase("Property") || } else if (child.getNodeName().equalsIgnoreCase("Property") ||
child.getNodeName().endsWith(":Property")) { child.getNodeName().endsWith(":Property")) {
OVFPropertyTO prop = createOVFPropertyFromNode(child, propertyIndex, lastCategoryFound); OVFPropertyTO prop = createOVFPropertyFromNode(child, propertyIndex, lastCategoryFound);
@ -249,13 +250,13 @@ public class OVFHelper {
int diskNumber = 0; int diskNumber = 0;
for (OVFVirtualHardwareItemTO diskItem : diskHardwareItems) { for (OVFVirtualHardwareItemTO diskItem : diskHardwareItems) {
if (StringUtils.isBlank(diskItem.getHostResource())) { if (StringUtils.isBlank(diskItem.getHostResource())) {
logger.error("Missing disk information for hardware item {} {}", diskItem.getElementName(), diskItem.getInstanceId()); logger.error("Missing disk information for hardware item " + diskItem.getElementName() + " " + diskItem.getInstanceId());
continue; continue;
} }
String diskId = extractDiskIdFromDiskHostResource(diskItem.getHostResource()); String diskId = extractDiskIdFromDiskHostResource(diskItem.getHostResource());
OVFDisk diskDefinition = getDiskDefinitionFromDiskId(diskId, disks); OVFDisk diskDefinition = getDiskDefinitionFromDiskId(diskId, disks);
if (diskDefinition == null) { if (diskDefinition == null) {
logger.error("Missing disk definition for disk ID {}", diskId); logger.error("Missing disk definition for disk ID " + diskId);
} }
OVFFile fileDefinition = getFileDefinitionFromDiskDefinition(diskDefinition._fileRef, files); OVFFile fileDefinition = getFileDefinitionFromDiskDefinition(diskDefinition._fileRef, files);
DatadiskTO datadiskTO = generateDiskTO(fileDefinition, diskDefinition, ovfParentPath, diskNumber, diskItem); DatadiskTO datadiskTO = generateDiskTO(fileDefinition, diskDefinition, ovfParentPath, diskNumber, diskItem);
@ -277,8 +278,8 @@ public class OVFHelper {
if (StringUtils.isNotBlank(path)) { if (StringUtils.isNotBlank(path)) {
File f = new File(path); File f = new File(path);
if (!f.exists() || f.isDirectory()) { if (!f.exists() || f.isDirectory()) {
logger.error("One of the attached disk or ISOs does not exists {}", path); logger.error("One of the attached disk or ISO does not exists " + path);
throw new InternalErrorException("One of the attached disk or ISOs as stated on OVF does not exists " + path); throw new InternalErrorException("One of the attached disk or ISO as stated on OVF does not exists " + path);
} }
} }
Long capacity = disk != null ? disk._capacity : file._size; Long capacity = disk != null ? disk._capacity : file._size;
@ -333,7 +334,9 @@ public class OVFHelper {
od._controller = getControllerType(items, od._diskId); od._controller = getControllerType(items, od._diskId);
vd.add(od); vd.add(od);
} }
logger.trace("Found {} disk definitions", vd.size()); if (logger.isTraceEnabled()) {
logger.trace(String.format("Found %d disk definitions", vd.size()));
}
return vd; return vd;
} }
@ -363,7 +366,9 @@ public class OVFHelper {
vf.add(of); vf.add(of);
} }
} }
logger.trace("Found {} file definitions in {}", vf.size(), ovfFile.getPath()); if (logger.isTraceEnabled()) {
logger.trace(String.format("Found %d file definitions in %s", vf.size(), ovfFile.getPath()));
}
return vf; return vf;
} }
@ -501,7 +506,7 @@ public class OVFHelper {
outfile.write(writer.toString()); outfile.write(writer.toString());
outfile.close(); outfile.close();
} catch (IOException | TransformerException e) { } catch (IOException | TransformerException e) {
logger.info("Unexpected exception caught while rewriting OVF: {}", e.getMessage(), e); logger.info("Unexpected exception caught while rewriting OVF:" + e.getMessage(), e);
throw new CloudRuntimeException(e); throw new CloudRuntimeException(e);
} }
} }
@ -517,7 +522,9 @@ public class OVFHelper {
public List<OVFNetworkTO> getNetPrerequisitesFromDocument(Document doc) throws InternalErrorException { public List<OVFNetworkTO> getNetPrerequisitesFromDocument(Document doc) throws InternalErrorException {
if (doc == null) { if (doc == null) {
logger.trace("No document to parse; returning no prerequisite networks"); if (logger.isTraceEnabled()) {
logger.trace("No document to parse; returning no prerequisite Networks");
}
return Collections.emptyList(); return Collections.emptyList();
} }
@ -533,7 +540,9 @@ public class OVFHelper {
private void matchNicsToNets(Map<String, OVFNetworkTO> nets, Node systemElement) { private void matchNicsToNets(Map<String, OVFNetworkTO> nets, Node systemElement) {
final DocumentTraversal traversal = (DocumentTraversal) systemElement; final DocumentTraversal traversal = (DocumentTraversal) systemElement;
final NodeIterator iterator = traversal.createNodeIterator(systemElement, NodeFilter.SHOW_ELEMENT, null, true); final NodeIterator iterator = traversal.createNodeIterator(systemElement, NodeFilter.SHOW_ELEMENT, null, true);
logger.trace("Starting out with {} network-prerequisites, parsing hardware", nets.size()); if (logger.isTraceEnabled()) {
logger.trace(String.format("Starting out with %d network-prerequisites, parsing hardware",nets.size()));
}
int nicCount = 0; int nicCount = 0;
for (Node n = iterator.nextNode(); n != null; n = iterator.nextNode()) { for (Node n = iterator.nextNode(); n != null; n = iterator.nextNode()) {
final Element e = (Element) n; final Element e = (Element) n;
@ -541,7 +550,9 @@ public class OVFHelper {
nicCount++; nicCount++;
String name = e.getTextContent(); // should be in our nets String name = e.getTextContent(); // should be in our nets
if(nets.get(name) == null) { if(nets.get(name) == null) {
logger.info("Found a NIC definition without a Network definition by name {}, adding it to the list.", name); if(logger.isInfoEnabled()) {
logger.info(String.format("Found a NIC definition without a Network definition by name %s, adding it to the list.", name));
}
nets.put(name, new OVFNetworkTO()); nets.put(name, new OVFNetworkTO());
} }
OVFNetworkTO thisNet = nets.get(name); OVFNetworkTO thisNet = nets.get(name);
@ -550,7 +561,9 @@ public class OVFHelper {
} }
} }
} }
logger.trace("Ending up with {} network-prerequisites, parsed {} nics", nets.size(), nicCount); if (logger.isTraceEnabled()) {
logger.trace(String.format("Ending up with %d network-prerequisites, parsed %d NICs", nets.size(), nicCount));
}
} }
/** /**
@ -572,7 +585,7 @@ public class OVFHelper {
int addressOnParent = Integer.parseInt(addressOnParentStr); int addressOnParent = Integer.parseInt(addressOnParentStr);
nic.setAddressOnParent(addressOnParent); nic.setAddressOnParent(addressOnParent);
} catch (NumberFormatException e) { } catch (NumberFormatException e) {
logger.warn("Encountered element of type \"AddressOnParent\", that could not be parse to an integer number: {}", addressOnParentStr); logger.warn("Encountered element of type \"AddressOnParent\", that could not be parse to an integer number: " + addressOnParentStr);
} }
boolean automaticAllocation = StringUtils.isNotBlank(automaticAllocationStr) && Boolean.parseBoolean(automaticAllocationStr); boolean automaticAllocation = StringUtils.isNotBlank(automaticAllocationStr) && Boolean.parseBoolean(automaticAllocationStr);
@ -584,7 +597,7 @@ public class OVFHelper {
int instanceId = Integer.parseInt(instanceIdStr); int instanceId = Integer.parseInt(instanceIdStr);
nic.setInstanceID(instanceId); nic.setInstanceID(instanceId);
} catch (NumberFormatException e) { } catch (NumberFormatException e) {
logger.warn("Encountered element of type \"InstanceID\", that could not be parse to an integer number: {}", instanceIdStr); logger.warn("Encountered element of type \"InstanceID\", that could not be parse to an integer number: " + instanceIdStr);
} }
nic.setResourceSubType(resourceSubType); nic.setResourceSubType(resourceSubType);
@ -617,7 +630,9 @@ public class OVFHelper {
nets.put(networkName,network); nets.put(networkName,network);
} }
logger.trace("Found {} Networks in Template", nets.size()); if (logger.isTraceEnabled()) {
logger.trace(String.format("Found %d Networks in Template", nets.size()));
}
return nets; return nets;
} }
@ -756,7 +771,7 @@ public class OVFHelper {
try { try {
return Long.parseLong(value); return Long.parseLong(value);
} catch (NumberFormatException e) { } catch (NumberFormatException e) {
logger.debug("Could not parse the value: {}, ignoring it", value); logger.debug("Could not parse the value: " + value + ", ignoring it");
} }
} }
return null; return null;
@ -767,7 +782,7 @@ public class OVFHelper {
try { try {
return Integer.parseInt(value); return Integer.parseInt(value);
} catch (NumberFormatException e) { } catch (NumberFormatException e) {
logger.debug("Could not parse the value: {}, ignoring it", value); logger.debug("Could not parse the value: " + value + ", ignoring it");
} }
} }
return null; return null;
@ -805,7 +820,7 @@ public class OVFHelper {
try { try {
compressedLicense = compressOVFEula(eulaLicense); compressedLicense = compressOVFEula(eulaLicense);
} catch (IOException e) { } catch (IOException e) {
logger.error("Could not compress the license for info {}", eulaInfo); logger.error("Could not compress the license for info " + eulaInfo);
continue; continue;
} }
OVFEulaSectionTO eula = new OVFEulaSectionTO(eulaInfo, compressedLicense, eulaIndex); OVFEulaSectionTO eula = new OVFEulaSectionTO(eulaInfo, compressedLicense, eulaIndex);

View File

@ -54,7 +54,7 @@ public class OVFParser {
documentBuilderFactory.setNamespaceAware(true); documentBuilderFactory.setNamespaceAware(true);
documentBuilder = documentBuilderFactory.newDocumentBuilder(); documentBuilder = documentBuilderFactory.newDocumentBuilder();
} catch (ParserConfigurationException e) { } catch (ParserConfigurationException e) {
logger.error("Cannot start the OVF parser: {}", e.getMessage(), e); logger.error("Cannot start the OVF parser: " + e.getMessage(), e);
} }
} }
@ -70,7 +70,7 @@ public class OVFParser {
try { try {
return documentBuilder.parse(new File(ovfFilePath)); return documentBuilder.parse(new File(ovfFilePath));
} catch (SAXException | IOException e) { } catch (SAXException | IOException e) {
logger.error("Error parsing {} {}", ovfFilePath, e.getMessage(), e); logger.error("Error parsing " + ovfFilePath + " " + e.getMessage(), e);
return null; return null;
} }
} }

View File

@ -46,7 +46,7 @@ public class DiskTO {
private Long diskSeq; private Long diskSeq;
private String path; private String path;
private Volume.Type type; private Volume.Type type;
private Map<String, String> details; private Map<String, String> _details;
public DiskTO() { public DiskTO() {
@ -92,10 +92,10 @@ public class DiskTO {
} }
public void setDetails(Map<String, String> details) { public void setDetails(Map<String, String> details) {
this.details = details; _details = details;
} }
public Map<String, String> getDetails() { public Map<String, String> getDetails() {
return details; return _details;
} }
} }

View File

@ -47,7 +47,7 @@ public class FirewallRuleTO implements InternalIdentity {
int[] srcPortRange; int[] srcPortRange;
boolean revoked; boolean revoked;
boolean alreadyAdded; boolean alreadyAdded;
protected List<String> sourceCidrList; private List<String> sourceCidrList;
private List<String> destCidrList; private List<String> destCidrList;
FirewallRule.Purpose purpose; FirewallRule.Purpose purpose;
private Integer icmpType; private Integer icmpType;

View File

@ -16,9 +16,7 @@
// under the License. // under the License.
package com.cloud.agent.api.to; package com.cloud.agent.api.to;
import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.List;
import com.cloud.agent.api.VgpuTypesInfo; import com.cloud.agent.api.VgpuTypesInfo;
@ -26,23 +24,9 @@ public class GPUDeviceTO {
private String gpuGroup; private String gpuGroup;
private String vgpuType; private String vgpuType;
private int gpuCount;
private HashMap<String, HashMap<String, VgpuTypesInfo>> groupDetails = new HashMap<String, HashMap<String, VgpuTypesInfo>>(); private HashMap<String, HashMap<String, VgpuTypesInfo>> groupDetails = new HashMap<String, HashMap<String, VgpuTypesInfo>>();
private List<VgpuTypesInfo> gpuDevices = new ArrayList<>();
public GPUDeviceTO(String gpuGroup, String vgpuType, int gpuCount, public GPUDeviceTO( String gpuGroup, String vgpuType, HashMap<String, HashMap<String, VgpuTypesInfo>> groupDetails) {
HashMap<String, HashMap<String, VgpuTypesInfo>> groupDetails,
List<VgpuTypesInfo> gpuDevices) {
this.gpuGroup = gpuGroup;
this.vgpuType = vgpuType;
this.groupDetails = groupDetails;
this.gpuCount = gpuCount;
this.gpuDevices = gpuDevices;
}
public GPUDeviceTO(String gpuGroup, String vgpuType,
HashMap<String, HashMap<String, VgpuTypesInfo>> groupDetails) {
this.gpuGroup = gpuGroup; this.gpuGroup = gpuGroup;
this.vgpuType = vgpuType; this.vgpuType = vgpuType;
this.groupDetails = groupDetails; this.groupDetails = groupDetails;
@ -64,14 +48,6 @@ public class GPUDeviceTO {
this.vgpuType = vgpuType; this.vgpuType = vgpuType;
} }
public int getGpuCount() {
return gpuCount;
}
public void setGpuCount(int gpuCount) {
this.gpuCount = gpuCount;
}
public HashMap<String, HashMap<String, VgpuTypesInfo>> getGroupDetails() { public HashMap<String, HashMap<String, VgpuTypesInfo>> getGroupDetails() {
return groupDetails; return groupDetails;
} }
@ -80,11 +56,4 @@ public class GPUDeviceTO {
this.groupDetails = groupDetails; this.groupDetails = groupDetails;
} }
public List<VgpuTypesInfo> getGpuDevices() {
return gpuDevices;
}
public void setGpuDevices(List<VgpuTypesInfo> gpuDevices) {
this.gpuDevices = gpuDevices;
}
} }

View File

@ -71,7 +71,7 @@ public class LoadBalancerTO {
this.destinations = new DestinationTO[destinations.size()]; this.destinations = new DestinationTO[destinations.size()];
this.stickinessPolicies = null; this.stickinessPolicies = null;
this.sslCert = null; this.sslCert = null;
this.lbProtocol = protocol; this.lbProtocol = null;
int i = 0; int i = 0;
for (LbDestination destination : destinations) { for (LbDestination destination : destinations) {
this.destinations[i++] = new DestinationTO(destination.getIpAddress(), destination.getDestinationPortStart(), destination.isRevoked(), false); this.destinations[i++] = new DestinationTO(destination.getIpAddress(), destination.getDestinationPortStart(), destination.isRevoked(), false);
@ -205,10 +205,6 @@ public class LoadBalancerTO {
return this.sslCert; return this.sslCert;
} }
public void setLbSslCert(LbSslCert sslCert) {
this.sslCert = sslCert;
}
public String getSrcIpVlan() { public String getSrcIpVlan() {
return srcIpVlan; return srcIpVlan;
} }

View File

@ -36,7 +36,7 @@ public class NetworkTO {
protected TrafficType type; protected TrafficType type;
protected URI broadcastUri; protected URI broadcastUri;
protected URI isolationUri; protected URI isolationUri;
protected boolean securityGroupEnabled; protected boolean isSecurityGroupEnabled;
protected String name; protected String name;
protected String ip6address; protected String ip6address;
protected String ip6gateway; protected String ip6gateway;
@ -112,7 +112,7 @@ public class NetworkTO {
} }
public void setSecurityGroupEnabled(boolean enabled) { public void setSecurityGroupEnabled(boolean enabled) {
this.securityGroupEnabled = enabled; this.isSecurityGroupEnabled = enabled;
} }
/** /**
@ -221,7 +221,7 @@ public class NetworkTO {
} }
public boolean isSecurityGroupEnabled() { public boolean isSecurityGroupEnabled() {
return this.securityGroupEnabled; return this.isSecurityGroupEnabled;
} }
public void setIp6Dns1(String ip6Dns1) { public void setIp6Dns1(String ip6Dns1) {

View File

@ -33,7 +33,6 @@ public class NicTO extends NetworkTO {
boolean dpdkEnabled; boolean dpdkEnabled;
Integer mtu; Integer mtu;
Long networkId; Long networkId;
boolean enabled;
String networkSegmentName; String networkSegmentName;
@ -87,14 +86,6 @@ public class NicTO extends NetworkTO {
this.nicUuid = uuid; this.nicUuid = uuid;
} }
public String getNicUuid() {
return nicUuid;
}
public void setNicUuid(String nicUuid) {
this.nicUuid = nicUuid;
}
@Override @Override
public String toString() { public String toString() {
return new StringBuilder("[Nic:").append(type).append("-").append(ip).append("-").append(broadcastUri).append("]").toString(); return new StringBuilder("[Nic:").append(type).append("-").append(ip).append("-").append(broadcastUri).append("]").toString();
@ -155,12 +146,4 @@ public class NicTO extends NetworkTO {
public void setNetworkSegmentName(String networkSegmentName) { public void setNetworkSegmentName(String networkSegmentName) {
this.networkSegmentName = networkSegmentName; this.networkSegmentName = networkSegmentName;
} }
public boolean isEnabled() {
return enabled;
}
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
} }

View File

@ -21,6 +21,8 @@ import com.cloud.network.rules.PortForwardingRule;
import com.cloud.utils.net.NetUtils; import com.cloud.utils.net.NetUtils;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import java.util.List;
/** /**
* PortForwardingRuleTO specifies one port forwarding rule. * PortForwardingRuleTO specifies one port forwarding rule.
* *
@ -30,6 +32,8 @@ public class PortForwardingRuleTO extends FirewallRuleTO {
String dstIp; String dstIp;
int[] dstPortRange; int[] dstPortRange;
List<String> sourceCidrList;
protected PortForwardingRuleTO() { protected PortForwardingRuleTO() {
super(); super();
} }

View File

@ -36,17 +36,13 @@ public class RemoteInstanceTO implements Serializable {
private String vcenterPassword; private String vcenterPassword;
private String vcenterHost; private String vcenterHost;
private String datacenterName; private String datacenterName;
private String clusterName;
private String hostName;
public RemoteInstanceTO() { public RemoteInstanceTO() {
} }
public RemoteInstanceTO(String instanceName, String clusterName, String hostName) { public RemoteInstanceTO(String instanceName) {
this.hypervisorType = Hypervisor.HypervisorType.VMware; this.hypervisorType = Hypervisor.HypervisorType.VMware;
this.instanceName = instanceName; this.instanceName = instanceName;
this.clusterName = clusterName;
this.hostName = hostName;
} }
public RemoteInstanceTO(String instanceName, String instancePath, String vcenterHost, String vcenterUsername, String vcenterPassword, String datacenterName) { public RemoteInstanceTO(String instanceName, String instancePath, String vcenterHost, String vcenterUsername, String vcenterPassword, String datacenterName) {
@ -59,12 +55,6 @@ public class RemoteInstanceTO implements Serializable {
this.datacenterName = datacenterName; this.datacenterName = datacenterName;
} }
public RemoteInstanceTO(String instanceName, String instancePath, String vcenterHost, String vcenterUsername, String vcenterPassword, String datacenterName, String clusterName, String hostName) {
this(instanceName, instancePath, vcenterHost, vcenterUsername, vcenterPassword, datacenterName);
this.clusterName = clusterName;
this.hostName = hostName;
}
public Hypervisor.HypervisorType getHypervisorType() { public Hypervisor.HypervisorType getHypervisorType() {
return this.hypervisorType; return this.hypervisorType;
} }
@ -92,12 +82,4 @@ public class RemoteInstanceTO implements Serializable {
public String getDatacenterName() { public String getDatacenterName() {
return datacenterName; return datacenterName;
} }
public String getClusterName() {
return clusterName;
}
public String getHostName() {
return hostName;
}
} }

View File

@ -1,182 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.agent.api.to;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class VirtualMachineMetadataTO {
// VM details
private final String name;
private final String internalName;
private final String displayName;
private final String instanceUuid;
private final Integer cpuCores;
private final Integer memory;
private final Long created;
private final Long started;
// Owner details
private final String ownerDomainUuid;
private final String ownerDomainName;
private final String ownerAccountUuid;
private final String ownerAccountName;
private final String ownerProjectUuid;
private final String ownerProjectName;
// Host and service offering
private final String serviceOfferingName;
private final List<String> serviceOfferingHostTags;
// zone, pod, and cluster details
private final String zoneName;
private final String zoneUuid;
private final String podName;
private final String podUuid;
private final String clusterName;
private final String clusterUuid;
// resource tags
private final Map<String, String> resourceTags;
public VirtualMachineMetadataTO(
String name, String internalName, String displayName, String instanceUuid, Integer cpuCores, Integer memory, Long created, Long started,
String ownerDomainUuid, String ownerDomainName, String ownerAccountUuid, String ownerAccountName, String ownerProjectUuid, String ownerProjectName,
String serviceOfferingName, List<String> serviceOfferingHostTags,
String zoneName, String zoneUuid, String podName, String podUuid, String clusterName, String clusterUuid, Map<String, String> resourceTags) {
/*
* Something failed in the metadata shall not be a fatal error, the VM can still be started
* Thus, the unknown fields just get an explicit "unknown" value so it can be fixed in case
* there are bugs on some execution paths.
* */
this.name = (name != null) ? name : "unknown";
this.internalName = (internalName != null) ? internalName : "unknown";
this.displayName = (displayName != null) ? displayName : "unknown";
this.instanceUuid = (instanceUuid != null) ? instanceUuid : "unknown";
this.cpuCores = (cpuCores != null) ? cpuCores : -1;
this.memory = (memory != null) ? memory : -1;
this.created = (created != null) ? created : 0;
this.started = (started != null) ? started : 0;
this.ownerDomainUuid = (ownerDomainUuid != null) ? ownerDomainUuid : "unknown";
this.ownerDomainName = (ownerDomainName != null) ? ownerDomainName : "unknown";
this.ownerAccountUuid = (ownerAccountUuid != null) ? ownerAccountUuid : "unknown";
this.ownerAccountName = (ownerAccountName != null) ? ownerAccountName : "unknown";
this.ownerProjectUuid = (ownerProjectUuid != null) ? ownerProjectUuid : "unknown";
this.ownerProjectName = (ownerProjectName != null) ? ownerProjectName : "unknown";
this.serviceOfferingName = (serviceOfferingName != null) ? serviceOfferingName : "unknown";
this.serviceOfferingHostTags = (serviceOfferingHostTags != null) ? serviceOfferingHostTags : new ArrayList<>();
this.zoneName = (zoneName != null) ? zoneName : "unknown";
this.zoneUuid = (zoneUuid != null) ? zoneUuid : "unknown";
this.podName = (podName != null) ? podName : "unknown";
this.podUuid = (podUuid != null) ? podUuid : "unknown";
this.clusterName = (clusterName != null) ? clusterName : "unknown";
this.clusterUuid = (clusterUuid != null) ? clusterUuid : "unknown";
this.resourceTags = (resourceTags != null) ? resourceTags : new HashMap<>();
}
public String getName() {
return name;
}
public String getInternalName() {
return internalName;
}
public String getDisplayName() {
return displayName;
}
public String getInstanceUuid() {
return instanceUuid;
}
public Integer getCpuCores() {
return cpuCores;
}
public Integer getMemory() {
return memory;
}
public Long getCreated() { return created; }
public Long getStarted() {
return started;
}
public String getOwnerDomainUuid() {
return ownerDomainUuid;
}
public String getOwnerDomainName() {
return ownerDomainName;
}
public String getOwnerAccountUuid() {
return ownerAccountUuid;
}
public String getOwnerAccountName() {
return ownerAccountName;
}
public String getOwnerProjectUuid() {
return ownerProjectUuid;
}
public String getOwnerProjectName() {
return ownerProjectName;
}
public String getserviceOfferingName() {
return serviceOfferingName;
}
public List<String> getserviceOfferingHostTags() {
return serviceOfferingHostTags;
}
public String getZoneName() {
return zoneName;
}
public String getZoneUuid() {
return zoneUuid;
}
public String getPodName() {
return podName;
}
public String getPodUuid() {
return podUuid;
}
public String getClusterName() {
return clusterName;
}
public String getClusterUuid() {
return clusterUuid;
}
public Map<String, String> getResourceTags() { return resourceTags; }
}

View File

@ -19,22 +19,20 @@ package com.cloud.agent.api.to;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.HashMap; import java.util.HashMap;
import java.util.stream.Collectors;
import com.cloud.agent.api.LogLevel; import com.cloud.agent.api.LogLevel;
import com.cloud.network.element.NetworkElement; import com.cloud.network.element.NetworkElement;
import com.cloud.template.VirtualMachineTemplate.BootloaderType; import com.cloud.template.VirtualMachineTemplate.BootloaderType;
import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.Type; import com.cloud.vm.VirtualMachine.Type;
import com.cloud.vm.VmDetailConstants;
public class VirtualMachineTO { public class VirtualMachineTO {
private long id; private long id;
private String name; private String name;
private BootloaderType bootloader; private BootloaderType bootloader;
private VirtualMachine.State state; private VirtualMachine.State state;
private Type type; Type type;
private int cpus; int cpus;
/** /**
'speed' is still here since 4.0.X/4.1.X management servers do not support 'speed' is still here since 4.0.X/4.1.X management servers do not support
@ -45,51 +43,49 @@ public class VirtualMachineTO {
So this is here for backwards compatibility with 4.0.X/4.1.X management servers So this is here for backwards compatibility with 4.0.X/4.1.X management servers
and newer agents. and newer agents.
*/ */
private Integer speed; Integer speed;
private Integer minSpeed; Integer minSpeed;
private Integer maxSpeed; Integer maxSpeed;
private long minRam; long minRam;
private long maxRam; long maxRam;
private String hostName; String hostName;
private String arch; String arch;
private String os; String os;
private String platformEmulator; String platformEmulator;
private String bootArgs; String bootArgs;
private String[] bootupScripts; String[] bootupScripts;
private boolean enableHA; boolean enableHA;
private boolean limitCpuUse; boolean limitCpuUse;
private boolean enableDynamicallyScaleVm; boolean enableDynamicallyScaleVm;
@LogLevel(LogLevel.Log4jLevel.Off) @LogLevel(LogLevel.Log4jLevel.Off)
private String vncPassword; String vncPassword;
private String vncAddr; String vncAddr;
private Map<String, String> details; Map<String, String> params;
private Map<String, String> params; String uuid;
private String uuid; String bootType;
private String bootType; String bootMode;
private String bootMode; boolean enterHardwareSetup;
private boolean enterHardwareSetup;
private DiskTO[] disks; DiskTO[] disks;
private NicTO[] nics; NicTO[] nics;
private GPUDeviceTO gpuDevice; GPUDeviceTO gpuDevice;
private Integer vcpuMaxLimit; Integer vcpuMaxLimit;
private List<String[]> vmData = null; List<String[]> vmData = null;
private String configDriveLabel = null; String configDriveLabel = null;
private String configDriveIsoRootFolder = null; String configDriveIsoRootFolder = null;
private String configDriveIsoFile = null; String configDriveIsoFile = null;
private NetworkElement.Location configDriveLocation = NetworkElement.Location.SECONDARY; NetworkElement.Location configDriveLocation = NetworkElement.Location.SECONDARY;
private Double cpuQuotaPercentage = null; Double cpuQuotaPercentage = null;
private Map<String, String> guestOsDetails = new HashMap<String, String>(); Map<String, String> guestOsDetails = new HashMap<String, String>();
private Map<String, String> extraConfig = new HashMap<>(); Map<String, String> extraConfig = new HashMap<>();
private Map<Long, String> networkIdToNetworkNameMap = new HashMap<>(); Map<Long, String> networkIdToNetworkNameMap = new HashMap<>();
private DeployAsIsInfoTO deployAsIsInfo; DeployAsIsInfoTO deployAsIsInfo;
private String metadataManufacturer; String metadataManufacturer;
private String metadataProductName; String metadataProductName;
private VirtualMachineMetadataTO metadata;
public VirtualMachineTO(long id, String instanceName, VirtualMachine.Type type, int cpus, Integer speed, long minRam, long maxRam, BootloaderType bootloader, public VirtualMachineTO(long id, String instanceName, VirtualMachine.Type type, int cpus, Integer speed, long minRam, long maxRam, BootloaderType bootloader,
String os, boolean enableHA, boolean limitCpuUse, String vncPassword) { String os, boolean enableHA, boolean limitCpuUse, String vncPassword) {
@ -195,11 +191,7 @@ public class VirtualMachineTO {
return maxSpeed; return maxSpeed;
} }
public boolean isEnableHA() { public boolean getLimitCpuUse() {
return enableHA;
}
public boolean isLimitCpuUse() {
return limitCpuUse; return limitCpuUse;
} }
@ -264,10 +256,6 @@ public class VirtualMachineTO {
this.bootupScripts = bootupScripts; this.bootupScripts = bootupScripts;
} }
public void setEnableHA(boolean enableHA) {
this.enableHA = enableHA;
}
public DiskTO[] getDisks() { public DiskTO[] getDisks() {
return disks; return disks;
} }
@ -301,11 +289,11 @@ public class VirtualMachineTO {
} }
public Map<String, String> getDetails() { public Map<String, String> getDetails() {
return details; return params;
} }
public void setDetails(Map<String, String> params) { public void setDetails(Map<String, String> params) {
this.details = params; this.params = params;
} }
public String getUuid() { public String getUuid() {
@ -443,42 +431,6 @@ public class VirtualMachineTO {
this.deployAsIsInfo = deployAsIsInfo; this.deployAsIsInfo = deployAsIsInfo;
} }
public void setSpeed(Integer speed) {
this.speed = speed;
}
public void setMinSpeed(Integer minSpeed) {
this.minSpeed = minSpeed;
}
public void setMaxSpeed(Integer maxSpeed) {
this.maxSpeed = maxSpeed;
}
public void setMinRam(long minRam) {
this.minRam = minRam;
}
public void setMaxRam(long maxRam) {
this.maxRam = maxRam;
}
public void setLimitCpuUse(boolean limitCpuUse) {
this.limitCpuUse = limitCpuUse;
}
public Map<String, String> getParams() {
return params;
}
public void setParams(Map<String, String> params) {
this.params = params;
}
public void setExtraConfig(Map<String, String> extraConfig) {
this.extraConfig = extraConfig;
}
public String getMetadataManufacturer() { public String getMetadataManufacturer() {
return metadataManufacturer; return metadataManufacturer;
} }
@ -495,28 +447,8 @@ public class VirtualMachineTO {
this.metadataProductName = metadataProductName; this.metadataProductName = metadataProductName;
} }
public VirtualMachineMetadataTO getMetadata() {
return metadata;
}
public void setMetadata(VirtualMachineMetadataTO metadata) {
this.metadata = metadata;
}
@Override @Override
public String toString() { public String toString() {
return String.format("VM {id: \"%s\", name: \"%s\", uuid: \"%s\", type: \"%s\"}", id, name, uuid, type); return String.format("VM {id: \"%s\", name: \"%s\", uuid: \"%s\", type: \"%s\"}", id, name, uuid, type);
} }
public Map<String, String> getExternalDetails() {
if (details == null) {
return new HashMap<>();
}
return details.entrySet().stream()
.filter(entry -> entry.getKey().startsWith(VmDetailConstants.EXTERNAL_DETAIL_PREFIX))
.collect(Collectors.toMap(
entry -> entry.getKey().substring(VmDetailConstants.EXTERNAL_DETAIL_PREFIX.length()),
Map.Entry::getValue
));
}
} }

View File

@ -22,11 +22,19 @@ import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.host.Host; import com.cloud.host.Host;
import com.cloud.host.Host.Type; import com.cloud.host.Host.Type;
import com.cloud.offering.ServiceOffering;
import com.cloud.utils.component.Adapter; import com.cloud.utils.component.Adapter;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.VirtualMachineProfile;
public interface HostAllocator extends Adapter { public interface HostAllocator extends Adapter {
/**
* @param UserVm vm
* @param ServiceOffering offering
**/
boolean isVirtualMachineUpgradable(final VirtualMachine vm, final ServiceOffering offering);
/** /**
* Determines which physical hosts are suitable to * Determines which physical hosts are suitable to
* allocate the guest virtual machines on * allocate the guest virtual machines on
@ -41,6 +49,31 @@ public interface HostAllocator extends Adapter {
public List<Host> allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan plan, Type type, ExcludeList avoid, int returnUpTo); public List<Host> allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan plan, Type type, ExcludeList avoid, int returnUpTo);
/**
* Determines which physical hosts are suitable to allocate the guest
* virtual machines on
*
* Allocators must set any other hosts not considered for allocation in the
* ExcludeList avoid. Thus the avoid set and the list of hosts suitable,
* together must cover the entire host set in the cluster.
*
* @param VirtualMachineProfile
* vmProfile
* @param DeploymentPlan
* plan
* @param GuestType
* type
* @param ExcludeList
* avoid
* @param int returnUpTo (use -1 to return all possible hosts)
* @param boolean considerReservedCapacity (default should be true, set to
* false if host capacity calculation should not look at reserved
* capacity)
* @return List<Host> List of hosts that are suitable for VM allocation
**/
public List<Host> allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan plan, Type type, ExcludeList avoid, int returnUpTo, boolean considerReservedCapacity);
/** /**
* Determines which physical hosts are suitable to allocate the guest * Determines which physical hosts are suitable to allocate the guest
* virtual machines on * virtual machines on

View File

@ -34,17 +34,13 @@ public interface Capacity extends InternalIdentity, Identity {
public static final short CAPACITY_TYPE_LOCAL_STORAGE = 9; public static final short CAPACITY_TYPE_LOCAL_STORAGE = 9;
public static final short CAPACITY_TYPE_VIRTUAL_NETWORK_IPV6_SUBNET = 10; public static final short CAPACITY_TYPE_VIRTUAL_NETWORK_IPV6_SUBNET = 10;
public static final short CAPACITY_TYPE_GPU = 19; public static final short CAPACITY_TYPE_GPU = 19;
public static final short CAPACITY_TYPE_OBJECT_STORAGE = 20;
public static final short CAPACITY_TYPE_BACKUP_STORAGE = 21;
public static final short CAPACITY_TYPE_CPU_CORE = 90; public static final short CAPACITY_TYPE_CPU_CORE = 90;
public static final List<Short> STORAGE_CAPACITY_TYPES = List.of(CAPACITY_TYPE_STORAGE, public static final List<Short> STORAGE_CAPACITY_TYPES = List.of(CAPACITY_TYPE_STORAGE,
CAPACITY_TYPE_STORAGE_ALLOCATED, CAPACITY_TYPE_STORAGE_ALLOCATED,
CAPACITY_TYPE_SECONDARY_STORAGE, CAPACITY_TYPE_SECONDARY_STORAGE,
CAPACITY_TYPE_LOCAL_STORAGE, CAPACITY_TYPE_LOCAL_STORAGE);
CAPACITY_TYPE_BACKUP_STORAGE,
CAPACITY_TYPE_OBJECT_STORAGE);
public Long getHostOrPoolId(); public Long getHostOrPoolId();

View File

@ -17,25 +17,18 @@
package com.cloud.configuration; package com.cloud.configuration;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.Objects;
import com.cloud.network.Network;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.command.admin.config.ResetCfgCmd; import org.apache.cloudstack.api.command.admin.config.ResetCfgCmd;
import org.apache.cloudstack.api.command.admin.config.UpdateCfgCmd; import org.apache.cloudstack.api.command.admin.config.UpdateCfgCmd;
import org.apache.cloudstack.api.command.admin.network.CloneNetworkOfferingCmd;
import org.apache.cloudstack.api.command.admin.network.CreateGuestNetworkIpv6PrefixCmd; import org.apache.cloudstack.api.command.admin.network.CreateGuestNetworkIpv6PrefixCmd;
import org.apache.cloudstack.api.command.admin.network.CreateManagementNetworkIpRangeCmd; import org.apache.cloudstack.api.command.admin.network.CreateManagementNetworkIpRangeCmd;
import org.apache.cloudstack.api.command.admin.network.CreateNetworkOfferingCmd;
import org.apache.cloudstack.api.command.admin.network.DeleteGuestNetworkIpv6PrefixCmd; import org.apache.cloudstack.api.command.admin.network.DeleteGuestNetworkIpv6PrefixCmd;
import org.apache.cloudstack.api.command.admin.network.DeleteManagementNetworkIpRangeCmd; import org.apache.cloudstack.api.command.admin.network.DeleteManagementNetworkIpRangeCmd;
import org.apache.cloudstack.api.command.admin.network.DeleteNetworkOfferingCmd; import org.apache.cloudstack.api.command.admin.network.DeleteNetworkOfferingCmd;
import org.apache.cloudstack.api.command.admin.network.ListGuestNetworkIpv6PrefixesCmd; import org.apache.cloudstack.api.command.admin.network.ListGuestNetworkIpv6PrefixesCmd;
import org.apache.cloudstack.api.command.admin.network.NetworkOfferingBaseCmd;
import org.apache.cloudstack.api.command.admin.network.UpdateNetworkOfferingCmd; import org.apache.cloudstack.api.command.admin.network.UpdateNetworkOfferingCmd;
import org.apache.cloudstack.api.command.admin.network.UpdatePodManagementNetworkIpRangeCmd; import org.apache.cloudstack.api.command.admin.network.UpdatePodManagementNetworkIpRangeCmd;
import org.apache.cloudstack.api.command.admin.offering.CloneDiskOfferingCmd;
import org.apache.cloudstack.api.command.admin.offering.CloneServiceOfferingCmd;
import org.apache.cloudstack.api.command.admin.offering.CreateDiskOfferingCmd; import org.apache.cloudstack.api.command.admin.offering.CreateDiskOfferingCmd;
import org.apache.cloudstack.api.command.admin.offering.CreateServiceOfferingCmd; import org.apache.cloudstack.api.command.admin.offering.CreateServiceOfferingCmd;
import org.apache.cloudstack.api.command.admin.offering.DeleteDiskOfferingCmd; import org.apache.cloudstack.api.command.admin.offering.DeleteDiskOfferingCmd;
@ -108,52 +101,39 @@ public interface ConfigurationService {
*/ */
ServiceOffering createServiceOffering(CreateServiceOfferingCmd cmd); ServiceOffering createServiceOffering(CreateServiceOfferingCmd cmd);
/**
* Clones a service offering with optional parameter overrides
*
* @param cmd
* the command object that specifies the source offering ID and optional parameter overrides
* @return the newly created service offering cloned from source, null otherwise
*/
ServiceOffering cloneServiceOffering(CloneServiceOfferingCmd cmd);
/**
* Clones a disk offering with optional parameter overrides
*
* @param cmd
* the command object that specifies the source offering ID and optional parameter overrides
* @return the newly created disk offering cloned from source, null otherwise
*/
DiskOffering cloneDiskOffering(CloneDiskOfferingCmd cmd);
/**
* Clones a network offering with optional parameter overrides
*
* @param cmd
* the command object that specifies the source offering ID and optional parameter overrides
* @return the newly created network offering cloned from source, null otherwise
*/
NetworkOffering cloneNetworkOffering(CloneNetworkOfferingCmd cmd);
/** /**
* Updates a service offering * Updates a service offering
* *
* @param serviceOfferingId
* @param userId
* @param name
* @param displayText
* @param offerHA
* @param useVirtualNetwork
* @param tags
* @return updated service offering * @return updated service offering
*/ */
ServiceOffering updateServiceOffering(UpdateServiceOfferingCmd cmd); ServiceOffering updateServiceOffering(UpdateServiceOfferingCmd cmd);
/** /**
* Deletes a service offering * Deletes a service offering
*
* @param userId
* @param serviceOfferingId
*/ */
boolean deleteServiceOffering(DeleteServiceOfferingCmd cmd); boolean deleteServiceOffering(DeleteServiceOfferingCmd cmd);
/** /**
* Retrieve ID of domains for a service offering * Retrieve ID of domains for a service offering
*
* @param serviceOfferingId
*/ */
List<Long> getServiceOfferingDomains(Long serviceOfferingId); List<Long> getServiceOfferingDomains(Long serviceOfferingId);
/** /**
* Retrieve ID of domains for a service offering * Retrieve ID of domains for a service offering
*
* @param serviceOfferingId
*/ */
List<Long> getServiceOfferingZones(Long serviceOfferingId); List<Long> getServiceOfferingZones(Long serviceOfferingId);
@ -163,6 +143,7 @@ public interface ConfigurationService {
* @param cmd * @param cmd
* - the command specifying diskOfferingId, name, description, tags * - the command specifying diskOfferingId, name, description, tags
* @return updated disk offering * @return updated disk offering
* @throws
*/ */
DiskOffering updateDiskOffering(UpdateDiskOfferingCmd cmd); DiskOffering updateDiskOffering(UpdateDiskOfferingCmd cmd);
@ -172,22 +153,34 @@ public interface ConfigurationService {
* @param cmd * @param cmd
* - the command specifying disk offering id * - the command specifying disk offering id
* @return true or false * @return true or false
* @throws
*/ */
boolean deleteDiskOffering(DeleteDiskOfferingCmd cmd); boolean deleteDiskOffering(DeleteDiskOfferingCmd cmd);
/** /**
* Creates a new disk offering * Creates a new disk offering
*
* @param domainId
* @param name
* @param description
* @param numGibibytes
* @param mirrored
* @param size
* @return ID * @return ID
*/ */
DiskOffering createDiskOffering(CreateDiskOfferingCmd cmd); DiskOffering createDiskOffering(CreateDiskOfferingCmd cmd);
/** /**
* Retrieve ID of domains for a disk offering * Retrieve ID of domains for a disk offering
*
* @param diskOfferingId
*/ */
List<Long> getDiskOfferingDomains(Long diskOfferingId); List<Long> getDiskOfferingDomains(Long diskOfferingId);
/** /**
* Retrieve ID of domains for a disk offering * Retrieve ID of domains for a disk offering
*
* @param diskOfferingId
*/ */
List<Long> getDiskOfferingZones(Long diskOfferingId); List<Long> getDiskOfferingZones(Long diskOfferingId);
@ -208,10 +201,11 @@ public interface ConfigurationService {
* TODO * TODO
* @param allocationState * @param allocationState
* TODO * TODO
* @param storageAccessGroups
* @return the new pod if successful, null otherwise * @return the new pod if successful, null otherwise
* @throws
* @throws
*/ */
Pod createPod(long zoneId, String name, String startIp, String endIp, String gateway, String netmask, String allocationState, List<String> storageAccessGroups); Pod createPod(long zoneId, String name, String startIp, String endIp, String gateway, String netmask, String allocationState);
/** /**
* Creates a mutual exclusive IP range in the pod with same gateway, netmask. * Creates a mutual exclusive IP range in the pod with same gateway, netmask.
@ -229,7 +223,8 @@ public interface ConfigurationService {
/** /**
* Updates a mutually exclusive IP range in the pod. * Updates a mutually exclusive IP range in the pod.
* @param cmd - The command specifying pod ID, current Start IP, current End IP, new Start IP, new End IP. * @param cmd - The command specifying pod ID, current Start IP, current End IP, new Start IP, new End IP.
* @throws com.cloud.exception.ConcurrentOperationException when this pod is already being accessed * @throws com.cloud.exception.ConcurrentOperationException
* @return Success
*/ */
void updatePodIpRange(UpdatePodManagementNetworkIpRangeCmd cmd) throws ConcurrentOperationException; void updatePodIpRange(UpdatePodManagementNetworkIpRangeCmd cmd) throws ConcurrentOperationException;
@ -250,6 +245,9 @@ public interface ConfigurationService {
/** /**
* Edits a pod in the database. Will not allow you to edit pods that are being used anywhere in the system. * Edits a pod in the database. Will not allow you to edit pods that are being used anywhere in the system.
*
* @param UpdatePodCmd
* api command
*/ */
Pod editPod(UpdatePodCmd cmd); Pod editPod(UpdatePodCmd cmd);
@ -259,12 +257,17 @@ public interface ConfigurationService {
* @param cmd * @param cmd
* - the command containing podId * - the command containing podId
* @return true or false * @return true or false
* @throws ,
*/ */
boolean deletePod(DeletePodCmd cmd); boolean deletePod(DeletePodCmd cmd);
/** /**
* Creates a new zone * Creates a new zone
*
* @param cmd
* @return the zone if successful, null otherwise * @return the zone if successful, null otherwise
* @throws
* @throws
*/ */
DataCenter createZone(CreateZoneCmd cmd); DataCenter createZone(CreateZoneCmd cmd);
@ -287,7 +290,22 @@ public interface ConfigurationService {
* Adds a VLAN to the database, along with an IP address range. Can add three types of VLANs: (1) zone-wide VLANs on * Adds a VLAN to the database, along with an IP address range. Can add three types of VLANs: (1) zone-wide VLANs on
* the * the
* virtual public network (2) pod-wide direct attached VLANs (3) account-specific direct attached VLANs * virtual public network (2) pod-wide direct attached VLANs (3) account-specific direct attached VLANs
*
* @param userId
* @param vlanType
* - either "DomR" (VLAN for a virtual public network) or "DirectAttached" (VLAN for IPs that will be
* directly
* attached to UserVMs)
* @param zoneId
* @param accountId
* @param podId
* @param add
* @param vlanId
* @param gateway
* @param startIP
* @param endIP
* @throws ResourceAllocationException TODO * @throws ResourceAllocationException TODO
* @throws
* @return The new Vlan object * @return The new Vlan object
*/ */
Vlan createVlanAndPublicIpRange(CreateVlanIpRangeCmd cmd) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, Vlan createVlanAndPublicIpRange(CreateVlanIpRangeCmd cmd) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException,
@ -302,6 +320,9 @@ public interface ConfigurationService {
/** /**
* Marks the account with the default zone-id. * Marks the account with the default zone-id.
* *
* @param accountName
* @param domainId
* @param defaultZoneId
* @return The new account object * @return The new account object
*/ */
Account markDefaultZone(String accountName, long domainId, long defaultZoneId); Account markDefaultZone(String accountName, long domainId, long defaultZoneId);
@ -312,7 +333,7 @@ public interface ConfigurationService {
boolean releasePublicIpRange(ReleasePublicIpRangeCmd cmd); boolean releasePublicIpRange(ReleasePublicIpRangeCmd cmd);
NetworkOffering createNetworkOffering(NetworkOfferingBaseCmd cmd); NetworkOffering createNetworkOffering(CreateNetworkOfferingCmd cmd);
NetworkOffering updateNetworkOffering(UpdateNetworkOfferingCmd cmd); NetworkOffering updateNetworkOffering(UpdateNetworkOfferingCmd cmd);
@ -323,12 +344,14 @@ public interface ConfigurationService {
/** /**
* Retrieve ID of domains for a network offering * Retrieve ID of domains for a network offering
* *
* @param networkOfferingId
*/ */
List<Long> getNetworkOfferingDomains(Long networkOfferingId); List<Long> getNetworkOfferingDomains(Long networkOfferingId);
/** /**
* Retrieve ID of domains for a network offering * Retrieve ID of domains for a network offering
* *
* @param networkOfferingId
*/ */
List<Long> getNetworkOfferingZones(Long networkOfferingId); List<Long> getNetworkOfferingZones(Long networkOfferingId);
@ -349,16 +372,4 @@ public interface ConfigurationService {
List<? extends PortableIp> listPortableIps(long id); List<? extends PortableIp> listPortableIps(long id);
Boolean isAccountAllowedToCreateOfferingsWithTags(IsAccountAllowedToCreateOfferingsWithTagsCmd cmd); Boolean isAccountAllowedToCreateOfferingsWithTags(IsAccountAllowedToCreateOfferingsWithTagsCmd cmd);
public static final Map<String, String> ProviderDetailKeyMap = Map.of(
Network.Provider.Nsx.getName(), ApiConstants.NSX_DETAIL_KEY,
Network.Provider.Netris.getName(), ApiConstants.NETRIS_DETAIL_KEY
);
public static boolean IsIpRangeForProvider(Network.Provider provider) {
if (Objects.isNull(provider)) {
return false;
}
return ProviderDetailKeyMap.containsKey(provider.getName());
}
} }

View File

@ -21,7 +21,7 @@ public interface Resource {
short RESOURCE_UNLIMITED = -1; short RESOURCE_UNLIMITED = -1;
String UNLIMITED = "Unlimited"; String UNLIMITED = "Unlimited";
enum ResourceType { // All storage type resources are allocated_storage and not the physical storage. enum ResourceType { // Primary and Secondary storage are allocated_storage and not the physical storage.
user_vm("user_vm", 0), user_vm("user_vm", 0),
public_ip("public_ip", 1), public_ip("public_ip", 1),
volume("volume", 2), volume("volume", 2),
@ -33,12 +33,7 @@ public interface Resource {
cpu("cpu", 8), cpu("cpu", 8),
memory("memory", 9), memory("memory", 9),
primary_storage("primary_storage", 10), primary_storage("primary_storage", 10),
secondary_storage("secondary_storage", 11), secondary_storage("secondary_storage", 11);
backup("backup", 12),
backup_storage("backup_storage", 13),
bucket("bucket", 14),
object_storage("object_storage", 15),
gpu("gpu", 16);
private String name; private String name;
private int ordinal; private int ordinal;
@ -67,10 +62,6 @@ public interface Resource {
} }
return null; return null;
} }
public static Boolean isStorageType(ResourceType type) {
return (type == primary_storage || type == secondary_storage || type == backup_storage || type == object_storage);
}
} }
public static class ResourceOwnerType { public static class ResourceOwnerType {

View File

@ -22,8 +22,7 @@ public class CPU {
public enum CPUArch { public enum CPUArch {
x86("i686", 32), x86("i686", 32),
amd64("x86_64", 64), amd64("x86_64", 64),
arm64("aarch64", 64), arm64("aarch64", 64);
s390x("s390x", 64);
private final String type; private final String type;
private final int bits; private final int bits;

View File

@ -43,6 +43,4 @@ public interface Pod extends InfrastructureEntity, Grouping, Identity, InternalI
AllocationState getAllocationState(); AllocationState getAllocationState();
boolean getExternalDhcp(); boolean getExternalDhcp();
String getStorageAccessGroups();
} }

View File

@ -62,11 +62,11 @@ public interface DeploymentClusterPlanner extends DeploymentPlanner {
"vm.allocation.algorithm", "vm.allocation.algorithm",
"Advanced", "Advanced",
"random", "random",
"Order in which hosts within a cluster will be considered for VM allocation. The value can be 'random', 'firstfit', 'userdispersing', or 'firstfitleastconsumed'.", "Order in which hosts within a cluster will be considered for VM/volume allocation. The value can be 'random', 'firstfit', 'userdispersing', 'userconcentratedpod_random', 'userconcentratedpod_firstfit', or 'firstfitleastconsumed'.",
true, true,
ConfigKey.Scope.Global, null, null, null, null, null, ConfigKey.Scope.Global, null, null, null, null, null,
ConfigKey.Kind.Select, ConfigKey.Kind.Select,
"random,firstfit,userdispersing,firstfitleastconsumed"); "random,firstfit,userdispersing,userconcentratedpod_random,userconcentratedpod_firstfit,firstfitleastconsumed");
/** /**
* This is called to determine list of possible clusters where a virtual * This is called to determine list of possible clusters where a virtual

View File

@ -70,7 +70,7 @@ public interface DeploymentPlanner extends Adapter {
boolean canHandle(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid); boolean canHandle(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid);
public enum AllocationAlgorithm { public enum AllocationAlgorithm {
random, firstfit, userdispersing, firstfitleastconsumed; random, firstfit, userdispersing, userconcentratedpod_random, userconcentratedpod_firstfit;
} }
public enum PlannerResourceUsage { public enum PlannerResourceUsage {

View File

@ -27,21 +27,15 @@ import org.apache.cloudstack.api.response.ClusterResponse;
import org.apache.cloudstack.api.response.HostResponse; import org.apache.cloudstack.api.response.HostResponse;
import org.apache.cloudstack.api.response.PodResponse; import org.apache.cloudstack.api.response.PodResponse;
import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.api.response.ZoneResponse;
import org.apache.cloudstack.backup.BackupRepositoryService;
import org.apache.cloudstack.config.Configuration; import org.apache.cloudstack.config.Configuration;
import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnet; import org.apache.cloudstack.datacenter.DataCenterIpv4GuestSubnet;
import org.apache.cloudstack.extension.Extension;
import org.apache.cloudstack.extension.ExtensionCustomAction;
import org.apache.cloudstack.gpu.GpuCard;
import org.apache.cloudstack.gpu.GpuDevice;
import org.apache.cloudstack.gpu.VgpuProfile;
import org.apache.cloudstack.ha.HAConfig; import org.apache.cloudstack.ha.HAConfig;
import org.apache.cloudstack.network.BgpPeer; import org.apache.cloudstack.network.BgpPeer;
import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMap; import org.apache.cloudstack.network.Ipv4GuestSubnetNetworkMap;
import org.apache.cloudstack.quota.QuotaTariff; import org.apache.cloudstack.quota.QuotaTariff;
import org.apache.cloudstack.storage.sharedfs.SharedFS;
import org.apache.cloudstack.storage.object.Bucket; import org.apache.cloudstack.storage.object.Bucket;
import org.apache.cloudstack.storage.object.ObjectStore; import org.apache.cloudstack.storage.object.ObjectStore;
import org.apache.cloudstack.storage.sharedfs.SharedFS;
import org.apache.cloudstack.usage.Usage; import org.apache.cloudstack.usage.Usage;
import org.apache.cloudstack.vm.schedule.VMSchedule; import org.apache.cloudstack.vm.schedule.VMSchedule;
@ -295,12 +289,9 @@ public class EventTypes {
//registering userdata events //registering userdata events
public static final String EVENT_REGISTER_USER_DATA = "REGISTER.USER.DATA"; public static final String EVENT_REGISTER_USER_DATA = "REGISTER.USER.DATA";
public static final String EVENT_REGISTER_CNI_CONFIG = "REGISTER.CNI.CONFIG";
public static final String EVENT_DELETE_CNI_CONFIG = "DELETE.CNI.CONFIG";
//user API and secret keys //register for user API and secret keys
public static final String EVENT_REGISTER_FOR_SECRET_API_KEY = "REGISTER.USER.KEY"; public static final String EVENT_REGISTER_FOR_SECRET_API_KEY = "REGISTER.USER.KEY";
public static final String EVENT_DELETE_SECRET_API_KEY = "DELETE.USER.KEY";
public static final String API_KEY_ACCESS_UPDATE = "API.KEY.ACCESS.UPDATE"; public static final String API_KEY_ACCESS_UPDATE = "API.KEY.ACCESS.UPDATE";
// Template Events // Template Events
@ -375,34 +366,16 @@ public class EventTypes {
// Service Offerings // Service Offerings
public static final String EVENT_SERVICE_OFFERING_CREATE = "SERVICE.OFFERING.CREATE"; public static final String EVENT_SERVICE_OFFERING_CREATE = "SERVICE.OFFERING.CREATE";
public static final String EVENT_SERVICE_OFFERING_CLONE = "SERVICE.OFFERING.CLONE";
public static final String EVENT_SERVICE_OFFERING_EDIT = "SERVICE.OFFERING.EDIT"; public static final String EVENT_SERVICE_OFFERING_EDIT = "SERVICE.OFFERING.EDIT";
public static final String EVENT_SERVICE_OFFERING_DELETE = "SERVICE.OFFERING.DELETE"; public static final String EVENT_SERVICE_OFFERING_DELETE = "SERVICE.OFFERING.DELETE";
// Disk Offerings // Disk Offerings
public static final String EVENT_DISK_OFFERING_CREATE = "DISK.OFFERING.CREATE"; public static final String EVENT_DISK_OFFERING_CREATE = "DISK.OFFERING.CREATE";
public static final String EVENT_DISK_OFFERING_CLONE = "DISK.OFFERING.CLONE";
public static final String EVENT_DISK_OFFERING_EDIT = "DISK.OFFERING.EDIT"; public static final String EVENT_DISK_OFFERING_EDIT = "DISK.OFFERING.EDIT";
public static final String EVENT_DISK_OFFERING_DELETE = "DISK.OFFERING.DELETE"; public static final String EVENT_DISK_OFFERING_DELETE = "DISK.OFFERING.DELETE";
// GPU Cards
public static final String EVENT_GPU_CARD_CREATE = "GPU.CARD.CREATE";
public static final String EVENT_GPU_CARD_EDIT = "GPU.CARD.EDIT";
public static final String EVENT_GPU_CARD_DELETE = "GPU.CARD.DELETE";
// vGPU Profile
public static final String EVENT_VGPU_PROFILE_CREATE = "VGPU.PROFILE.CREATE";
public static final String EVENT_VGPU_PROFILE_EDIT = "VGPU.PROFILE.EDIT";
public static final String EVENT_VGPU_PROFILE_DELETE = "VGPU.PROFILE.DELETE";
// GPU Devices
public static final String EVENT_GPU_DEVICE_CREATE = "GPU.DEVICE.CREATE";
public static final String EVENT_GPU_DEVICE_EDIT = "GPU.DEVICE.EDIT";
public static final String EVENT_GPU_DEVICE_DELETE = "GPU.DEVICE.DELETE";
// Network offerings // Network offerings
public static final String EVENT_NETWORK_OFFERING_CREATE = "NETWORK.OFFERING.CREATE"; public static final String EVENT_NETWORK_OFFERING_CREATE = "NETWORK.OFFERING.CREATE";
public static final String EVENT_NETWORK_OFFERING_CLONE = "NETWORK.OFFERING.CLONE";
public static final String EVENT_NETWORK_OFFERING_ASSIGN = "NETWORK.OFFERING.ASSIGN"; public static final String EVENT_NETWORK_OFFERING_ASSIGN = "NETWORK.OFFERING.ASSIGN";
public static final String EVENT_NETWORK_OFFERING_EDIT = "NETWORK.OFFERING.EDIT"; public static final String EVENT_NETWORK_OFFERING_EDIT = "NETWORK.OFFERING.EDIT";
public static final String EVENT_NETWORK_OFFERING_REMOVE = "NETWORK.OFFERING.REMOVE"; public static final String EVENT_NETWORK_OFFERING_REMOVE = "NETWORK.OFFERING.REMOVE";
@ -492,7 +465,6 @@ public class EventTypes {
public static final String EVENT_ENABLE_PRIMARY_STORAGE = "ENABLE.PS"; public static final String EVENT_ENABLE_PRIMARY_STORAGE = "ENABLE.PS";
public static final String EVENT_DISABLE_PRIMARY_STORAGE = "DISABLE.PS"; public static final String EVENT_DISABLE_PRIMARY_STORAGE = "DISABLE.PS";
public static final String EVENT_SYNC_STORAGE_POOL = "SYNC.STORAGE.POOL"; public static final String EVENT_SYNC_STORAGE_POOL = "SYNC.STORAGE.POOL";
public static final String EVENT_CONFIGURE_STORAGE_ACCESS = "CONFIGURE.STORAGE.ACCESS";
public static final String EVENT_CHANGE_STORAGE_POOL_SCOPE = "CHANGE.STORAGE.POOL.SCOPE"; public static final String EVENT_CHANGE_STORAGE_POOL_SCOPE = "CHANGE.STORAGE.POOL.SCOPE";
// VPN // VPN
@ -507,7 +479,6 @@ public class EventTypes {
public static final String EVENT_S2S_VPN_CUSTOMER_GATEWAY_CREATE = "VPN.S2S.CUSTOMER.GATEWAY.CREATE"; public static final String EVENT_S2S_VPN_CUSTOMER_GATEWAY_CREATE = "VPN.S2S.CUSTOMER.GATEWAY.CREATE";
public static final String EVENT_S2S_VPN_CUSTOMER_GATEWAY_DELETE = "VPN.S2S.CUSTOMER.GATEWAY.DELETE"; public static final String EVENT_S2S_VPN_CUSTOMER_GATEWAY_DELETE = "VPN.S2S.CUSTOMER.GATEWAY.DELETE";
public static final String EVENT_S2S_VPN_CUSTOMER_GATEWAY_UPDATE = "VPN.S2S.CUSTOMER.GATEWAY.UPDATE"; public static final String EVENT_S2S_VPN_CUSTOMER_GATEWAY_UPDATE = "VPN.S2S.CUSTOMER.GATEWAY.UPDATE";
public static final String EVENT_S2S_VPN_GATEWAY_OBSOLETE_PARAMS = "VPN.S2S.GATEWAY.OBSOLETE.PARAMS";
public static final String EVENT_S2S_VPN_CONNECTION_CREATE = "VPN.S2S.CONNECTION.CREATE"; public static final String EVENT_S2S_VPN_CONNECTION_CREATE = "VPN.S2S.CONNECTION.CREATE";
public static final String EVENT_S2S_VPN_CONNECTION_DELETE = "VPN.S2S.CONNECTION.DELETE"; public static final String EVENT_S2S_VPN_CONNECTION_DELETE = "VPN.S2S.CONNECTION.DELETE";
public static final String EVENT_S2S_VPN_CONNECTION_RESET = "VPN.S2S.CONNECTION.RESET"; public static final String EVENT_S2S_VPN_CONNECTION_RESET = "VPN.S2S.CONNECTION.RESET";
@ -525,8 +496,6 @@ public class EventTypes {
public static final String EVENT_ZONE_VLAN_ASSIGN = "ZONE.VLAN.ASSIGN"; public static final String EVENT_ZONE_VLAN_ASSIGN = "ZONE.VLAN.ASSIGN";
public static final String EVENT_ZONE_VLAN_RELEASE = "ZONE.VLAN.RELEASE"; public static final String EVENT_ZONE_VLAN_RELEASE = "ZONE.VLAN.RELEASE";
public static final String EVENT_ZONE_VXLAN_ASSIGN = "ZONE.VXLAN.ASSIGN";
public static final String EVENT_ZONE_VXLAN_RELEASE = "ZONE.VXLAN.RELEASE";
// Projects // Projects
public static final String EVENT_PROJECT_CREATE = "PROJECT.CREATE"; public static final String EVENT_PROJECT_CREATE = "PROJECT.CREATE";
@ -587,7 +556,6 @@ public class EventTypes {
// Network ACL // Network ACL
public static final String EVENT_NETWORK_ACL_CREATE = "NETWORK.ACL.CREATE"; public static final String EVENT_NETWORK_ACL_CREATE = "NETWORK.ACL.CREATE";
public static final String EVENT_NETWORK_ACL_IMPORT = "NETWORK.ACL.IMPORT";
public static final String EVENT_NETWORK_ACL_DELETE = "NETWORK.ACL.DELETE"; public static final String EVENT_NETWORK_ACL_DELETE = "NETWORK.ACL.DELETE";
public static final String EVENT_NETWORK_ACL_REPLACE = "NETWORK.ACL.REPLACE"; public static final String EVENT_NETWORK_ACL_REPLACE = "NETWORK.ACL.REPLACE";
public static final String EVENT_NETWORK_ACL_UPDATE = "NETWORK.ACL.UPDATE"; public static final String EVENT_NETWORK_ACL_UPDATE = "NETWORK.ACL.UPDATE";
@ -602,7 +570,6 @@ public class EventTypes {
// VPC offerings // VPC offerings
public static final String EVENT_VPC_OFFERING_CREATE = "VPC.OFFERING.CREATE"; public static final String EVENT_VPC_OFFERING_CREATE = "VPC.OFFERING.CREATE";
public static final String EVENT_VPC_OFFERING_CLONE = "VPC.OFFERING.CLONE";
public static final String EVENT_VPC_OFFERING_UPDATE = "VPC.OFFERING.UPDATE"; public static final String EVENT_VPC_OFFERING_UPDATE = "VPC.OFFERING.UPDATE";
public static final String EVENT_VPC_OFFERING_DELETE = "VPC.OFFERING.DELETE"; public static final String EVENT_VPC_OFFERING_DELETE = "VPC.OFFERING.DELETE";
@ -635,19 +602,16 @@ public class EventTypes {
// Backup and Recovery events // Backup and Recovery events
public static final String EVENT_VM_BACKUP_IMPORT_OFFERING = "BACKUP.IMPORT.OFFERING"; public static final String EVENT_VM_BACKUP_IMPORT_OFFERING = "BACKUP.IMPORT.OFFERING";
public static final String EVENT_VM_BACKUP_OFFERING_CLONE = "BACKUP.OFFERING.CLONE";
public static final String EVENT_VM_BACKUP_OFFERING_ASSIGN = "BACKUP.OFFERING.ASSIGN"; public static final String EVENT_VM_BACKUP_OFFERING_ASSIGN = "BACKUP.OFFERING.ASSIGN";
public static final String EVENT_VM_BACKUP_OFFERING_REMOVE = "BACKUP.OFFERING.REMOVE"; public static final String EVENT_VM_BACKUP_OFFERING_REMOVE = "BACKUP.OFFERING.REMOVE";
public static final String EVENT_VM_BACKUP_CREATE = "BACKUP.CREATE"; public static final String EVENT_VM_BACKUP_CREATE = "BACKUP.CREATE";
public static final String EVENT_VM_BACKUP_RESTORE = "BACKUP.RESTORE"; public static final String EVENT_VM_BACKUP_RESTORE = "BACKUP.RESTORE";
public static final String EVENT_VM_BACKUP_DELETE = "BACKUP.DELETE"; public static final String EVENT_VM_BACKUP_DELETE = "BACKUP.DELETE";
public static final String EVENT_VM_BACKUP_OFFERING_REMOVED_AND_BACKUPS_DELETED = "BACKUP.OFFERING.BACKUPS.DEL";
public static final String EVENT_VM_BACKUP_RESTORE_VOLUME_TO_VM = "BACKUP.RESTORE.VOLUME.TO.VM"; public static final String EVENT_VM_BACKUP_RESTORE_VOLUME_TO_VM = "BACKUP.RESTORE.VOLUME.TO.VM";
public static final String EVENT_VM_BACKUP_SCHEDULE_CONFIGURE = "BACKUP.SCHEDULE.CONFIGURE"; public static final String EVENT_VM_BACKUP_SCHEDULE_CONFIGURE = "BACKUP.SCHEDULE.CONFIGURE";
public static final String EVENT_VM_BACKUP_SCHEDULE_DELETE = "BACKUP.SCHEDULE.DELETE"; public static final String EVENT_VM_BACKUP_SCHEDULE_DELETE = "BACKUP.SCHEDULE.DELETE";
public static final String EVENT_VM_BACKUP_USAGE_METRIC = "BACKUP.USAGE.METRIC"; public static final String EVENT_VM_BACKUP_USAGE_METRIC = "BACKUP.USAGE.METRIC";
public static final String EVENT_VM_BACKUP_EDIT = "BACKUP.OFFERING.EDIT"; public static final String EVENT_VM_BACKUP_EDIT = "BACKUP.OFFERING.EDIT";
public static final String EVENT_VM_CREATE_FROM_BACKUP = "VM.CREATE.FROM.BACKUP";
// external network device events // external network device events
public static final String EVENT_EXTERNAL_NVP_CONTROLLER_ADD = "PHYSICAL.NVPCONTROLLER.ADD"; public static final String EVENT_EXTERNAL_NVP_CONTROLLER_ADD = "PHYSICAL.NVPCONTROLLER.ADD";
@ -723,9 +687,6 @@ public class EventTypes {
public static final String EVENT_EXTERNAL_OPENDAYLIGHT_CONFIGURE_CONTROLLER = "PHYSICAL.ODLCONTROLLER.CONFIGURE"; public static final String EVENT_EXTERNAL_OPENDAYLIGHT_CONFIGURE_CONTROLLER = "PHYSICAL.ODLCONTROLLER.CONFIGURE";
//Guest OS related events //Guest OS related events
public static final String EVENT_GUEST_OS_CATEGORY_ADD = "GUEST.OS.CATEGORY.ADD";
public static final String EVENT_GUEST_OS_CATEGORY_DELETE = "GUEST.OS.CATEGORY.DELETE";
public static final String EVENT_GUEST_OS_CATEGORY_UPDATE = "GUEST.OS.CATEGORY.UPDATE";
public static final String EVENT_GUEST_OS_ADD = "GUEST.OS.ADD"; public static final String EVENT_GUEST_OS_ADD = "GUEST.OS.ADD";
public static final String EVENT_GUEST_OS_REMOVE = "GUEST.OS.REMOVE"; public static final String EVENT_GUEST_OS_REMOVE = "GUEST.OS.REMOVE";
public static final String EVENT_GUEST_OS_UPDATE = "GUEST.OS.UPDATE"; public static final String EVENT_GUEST_OS_UPDATE = "GUEST.OS.UPDATE";
@ -778,13 +739,6 @@ public class EventTypes {
//Purge resources //Purge resources
public static final String EVENT_PURGE_EXPUNGED_RESOURCES = "PURGE.EXPUNGED.RESOURCES"; public static final String EVENT_PURGE_EXPUNGED_RESOURCES = "PURGE.EXPUNGED.RESOURCES";
// Management Server
public static final String EVENT_MS_MAINTENANCE_PREPARE = "MS.MAINTENANCE.PREPARE";
public static final String EVENT_MS_MAINTENANCE_CANCEL = "MS.MAINTENANCE.CANCEL";
public static final String EVENT_MS_SHUTDOWN_PREPARE = "MS.SHUTDOWN.PREPARE";
public static final String EVENT_MS_SHUTDOWN_CANCEL = "MS.SHUTDOWN.CANCEL";
public static final String EVENT_MS_SHUTDOWN = "MS.SHUTDOWN";
// OBJECT STORE // OBJECT STORE
public static final String EVENT_OBJECT_STORE_CREATE = "OBJECT.STORE.CREATE"; public static final String EVENT_OBJECT_STORE_CREATE = "OBJECT.STORE.CREATE";
public static final String EVENT_OBJECT_STORE_DELETE = "OBJECT.STORE.DELETE"; public static final String EVENT_OBJECT_STORE_DELETE = "OBJECT.STORE.DELETE";
@ -831,40 +785,6 @@ public class EventTypes {
public static final String EVENT_SHAREDFS_EXPUNGE = "SHAREDFS.EXPUNGE"; public static final String EVENT_SHAREDFS_EXPUNGE = "SHAREDFS.EXPUNGE";
public static final String EVENT_SHAREDFS_RECOVER = "SHAREDFS.RECOVER"; public static final String EVENT_SHAREDFS_RECOVER = "SHAREDFS.RECOVER";
// Resource Limit
public static final String EVENT_RESOURCE_LIMIT_UPDATE = "RESOURCE.LIMIT.UPDATE";
// Management Server
public static final String EVENT_MANAGEMENT_SERVER_REMOVE = "MANAGEMENT.SERVER.REMOVE";
// VM Lease
public static final String VM_LEASE_EXPIRED = "VM.LEASE.EXPIRED";
public static final String VM_LEASE_DISABLED = "VM.LEASE.DISABLED";
public static final String VM_LEASE_CANCELLED = "VM.LEASE.CANCELLED";
public static final String VM_LEASE_EXPIRING = "VM.LEASE.EXPIRING";
// GUI Theme
public static final String EVENT_GUI_THEME_CREATE = "GUI.THEME.CREATE";
public static final String EVENT_GUI_THEME_REMOVE = "GUI.THEME.REMOVE";
public static final String EVENT_GUI_THEME_UPDATE = "GUI.THEME.UPDATE";
// Extension
public static final String EVENT_EXTENSION_CREATE = "EXTENSION.CREATE";
public static final String EVENT_EXTENSION_UPDATE = "EXTENSION.UPDATE";
public static final String EVENT_EXTENSION_DELETE = "EXTENSION.DELETE";
public static final String EVENT_EXTENSION_RESOURCE_REGISTER = "EXTENSION.RESOURCE.REGISTER";
public static final String EVENT_EXTENSION_RESOURCE_UNREGISTER = "EXTENSION.RESOURCE.UNREGISTER";
public static final String EVENT_EXTENSION_CUSTOM_ACTION_ADD = "EXTENSION.CUSTOM.ACTION.ADD";
public static final String EVENT_EXTENSION_CUSTOM_ACTION_UPDATE = "EXTENSION.CUSTOM.ACTION.UPDATE";
public static final String EVENT_EXTENSION_CUSTOM_ACTION_DELETE = "EXTENSION.CUSTOM.ACTION.DELETE";
// Custom Action
public static final String EVENT_CUSTOM_ACTION = "CUSTOM.ACTION";
// Backup Repository
public static final String EVENT_BACKUP_REPOSITORY_ADD = "BACKUP.REPOSITORY.ADD";
public static final String EVENT_BACKUP_REPOSITORY_UPDATE = "BACKUP.REPOSITORY.UPDATE";
static { static {
// TODO: need a way to force author adding event types to declare the entity details as well, with out braking // TODO: need a way to force author adding event types to declare the entity details as well, with out braking
@ -1051,34 +971,16 @@ public class EventTypes {
// Service Offerings // Service Offerings
entityEventDetails.put(EVENT_SERVICE_OFFERING_CREATE, ServiceOffering.class); entityEventDetails.put(EVENT_SERVICE_OFFERING_CREATE, ServiceOffering.class);
entityEventDetails.put(EVENT_SERVICE_OFFERING_CLONE, ServiceOffering.class);
entityEventDetails.put(EVENT_SERVICE_OFFERING_EDIT, ServiceOffering.class); entityEventDetails.put(EVENT_SERVICE_OFFERING_EDIT, ServiceOffering.class);
entityEventDetails.put(EVENT_SERVICE_OFFERING_DELETE, ServiceOffering.class); entityEventDetails.put(EVENT_SERVICE_OFFERING_DELETE, ServiceOffering.class);
// Disk Offerings // Disk Offerings
entityEventDetails.put(EVENT_DISK_OFFERING_CREATE, DiskOffering.class); entityEventDetails.put(EVENT_DISK_OFFERING_CREATE, DiskOffering.class);
entityEventDetails.put(EVENT_DISK_OFFERING_CLONE, DiskOffering.class);
entityEventDetails.put(EVENT_DISK_OFFERING_EDIT, DiskOffering.class); entityEventDetails.put(EVENT_DISK_OFFERING_EDIT, DiskOffering.class);
entityEventDetails.put(EVENT_DISK_OFFERING_DELETE, DiskOffering.class); entityEventDetails.put(EVENT_DISK_OFFERING_DELETE, DiskOffering.class);
// GPU Cards
entityEventDetails.put(EVENT_GPU_CARD_CREATE, GpuCard.class);
entityEventDetails.put(EVENT_GPU_CARD_EDIT, GpuCard.class);
entityEventDetails.put(EVENT_GPU_CARD_DELETE, GpuCard.class);
// vGPU Profiles
entityEventDetails.put(EVENT_VGPU_PROFILE_CREATE, VgpuProfile.class);
entityEventDetails.put(EVENT_VGPU_PROFILE_EDIT, VgpuProfile.class);
entityEventDetails.put(EVENT_VGPU_PROFILE_DELETE, VgpuProfile.class);
// GPU Devices
entityEventDetails.put(EVENT_GPU_DEVICE_CREATE, GpuDevice.class);
entityEventDetails.put(EVENT_GPU_DEVICE_EDIT, GpuDevice.class);
entityEventDetails.put(EVENT_GPU_DEVICE_DELETE, GpuDevice.class);
// Network offerings // Network offerings
entityEventDetails.put(EVENT_NETWORK_OFFERING_CREATE, NetworkOffering.class); entityEventDetails.put(EVENT_NETWORK_OFFERING_CREATE, NetworkOffering.class);
entityEventDetails.put(EVENT_NETWORK_OFFERING_CLONE, NetworkOffering.class);
entityEventDetails.put(EVENT_NETWORK_OFFERING_ASSIGN, NetworkOffering.class); entityEventDetails.put(EVENT_NETWORK_OFFERING_ASSIGN, NetworkOffering.class);
entityEventDetails.put(EVENT_NETWORK_OFFERING_EDIT, NetworkOffering.class); entityEventDetails.put(EVENT_NETWORK_OFFERING_EDIT, NetworkOffering.class);
entityEventDetails.put(EVENT_NETWORK_OFFERING_REMOVE, NetworkOffering.class); entityEventDetails.put(EVENT_NETWORK_OFFERING_REMOVE, NetworkOffering.class);
@ -1162,7 +1064,6 @@ public class EventTypes {
entityEventDetails.put(EVENT_S2S_VPN_CUSTOMER_GATEWAY_CREATE, Site2SiteCustomerGateway.class); entityEventDetails.put(EVENT_S2S_VPN_CUSTOMER_GATEWAY_CREATE, Site2SiteCustomerGateway.class);
entityEventDetails.put(EVENT_S2S_VPN_CUSTOMER_GATEWAY_DELETE, Site2SiteCustomerGateway.class); entityEventDetails.put(EVENT_S2S_VPN_CUSTOMER_GATEWAY_DELETE, Site2SiteCustomerGateway.class);
entityEventDetails.put(EVENT_S2S_VPN_CUSTOMER_GATEWAY_UPDATE, Site2SiteCustomerGateway.class); entityEventDetails.put(EVENT_S2S_VPN_CUSTOMER_GATEWAY_UPDATE, Site2SiteCustomerGateway.class);
entityEventDetails.put(EVENT_S2S_VPN_GATEWAY_OBSOLETE_PARAMS, Site2SiteCustomerGateway.class);
entityEventDetails.put(EVENT_S2S_VPN_CONNECTION_CREATE, Site2SiteVpnConnection.class); entityEventDetails.put(EVENT_S2S_VPN_CONNECTION_CREATE, Site2SiteVpnConnection.class);
entityEventDetails.put(EVENT_S2S_VPN_CONNECTION_DELETE, Site2SiteVpnConnection.class); entityEventDetails.put(EVENT_S2S_VPN_CONNECTION_DELETE, Site2SiteVpnConnection.class);
entityEventDetails.put(EVENT_S2S_VPN_CONNECTION_RESET, Site2SiteVpnConnection.class); entityEventDetails.put(EVENT_S2S_VPN_CONNECTION_RESET, Site2SiteVpnConnection.class);
@ -1329,12 +1230,6 @@ public class EventTypes {
entityEventDetails.put(EVENT_UPDATE_IMAGE_STORE_ACCESS_STATE, ImageStore.class); entityEventDetails.put(EVENT_UPDATE_IMAGE_STORE_ACCESS_STATE, ImageStore.class);
entityEventDetails.put(EVENT_LIVE_PATCH_SYSTEMVM, "SystemVMs"); entityEventDetails.put(EVENT_LIVE_PATCH_SYSTEMVM, "SystemVMs");
entityEventDetails.put(EVENT_MS_MAINTENANCE_PREPARE, "ManagementServer");
entityEventDetails.put(EVENT_MS_MAINTENANCE_CANCEL, "ManagementServer");
entityEventDetails.put(EVENT_MS_SHUTDOWN_PREPARE, "ManagementServer");
entityEventDetails.put(EVENT_MS_SHUTDOWN_CANCEL, "ManagementServer");
entityEventDetails.put(EVENT_MS_SHUTDOWN, "ManagementServer");
//Object Store //Object Store
entityEventDetails.put(EVENT_OBJECT_STORE_CREATE, ObjectStore.class); entityEventDetails.put(EVENT_OBJECT_STORE_CREATE, ObjectStore.class);
entityEventDetails.put(EVENT_OBJECT_STORE_UPDATE, ObjectStore.class); entityEventDetails.put(EVENT_OBJECT_STORE_UPDATE, ObjectStore.class);
@ -1378,34 +1273,6 @@ public class EventTypes {
entityEventDetails.put(EVENT_SHAREDFS_DESTROY, SharedFS.class); entityEventDetails.put(EVENT_SHAREDFS_DESTROY, SharedFS.class);
entityEventDetails.put(EVENT_SHAREDFS_EXPUNGE, SharedFS.class); entityEventDetails.put(EVENT_SHAREDFS_EXPUNGE, SharedFS.class);
entityEventDetails.put(EVENT_SHAREDFS_RECOVER, SharedFS.class); entityEventDetails.put(EVENT_SHAREDFS_RECOVER, SharedFS.class);
// Management Server
entityEventDetails.put(EVENT_MANAGEMENT_SERVER_REMOVE, "ManagementServer");
// VM Lease
entityEventDetails.put(VM_LEASE_EXPIRED, VirtualMachine.class);
entityEventDetails.put(VM_LEASE_EXPIRING, VirtualMachine.class);
entityEventDetails.put(VM_LEASE_DISABLED, VirtualMachine.class);
entityEventDetails.put(VM_LEASE_CANCELLED, VirtualMachine.class);
// GUI theme
entityEventDetails.put(EVENT_GUI_THEME_CREATE, "GuiTheme");
entityEventDetails.put(EVENT_GUI_THEME_REMOVE, "GuiTheme");
entityEventDetails.put(EVENT_GUI_THEME_UPDATE, "GuiTheme");
// Extension
entityEventDetails.put(EVENT_EXTENSION_CREATE, Extension.class);
entityEventDetails.put(EVENT_EXTENSION_UPDATE, Extension.class);
entityEventDetails.put(EVENT_EXTENSION_DELETE, Extension.class);
entityEventDetails.put(EVENT_EXTENSION_RESOURCE_REGISTER, Extension.class);
entityEventDetails.put(EVENT_EXTENSION_RESOURCE_UNREGISTER, Extension.class);
entityEventDetails.put(EVENT_EXTENSION_CUSTOM_ACTION_ADD, ExtensionCustomAction.class);
entityEventDetails.put(EVENT_EXTENSION_CUSTOM_ACTION_UPDATE, ExtensionCustomAction.class);
entityEventDetails.put(EVENT_EXTENSION_CUSTOM_ACTION_DELETE, ExtensionCustomAction.class);
// Backup Repository
entityEventDetails.put(EVENT_BACKUP_REPOSITORY_ADD, BackupRepositoryService.class);
entityEventDetails.put(EVENT_BACKUP_REPOSITORY_UPDATE, BackupRepositoryService.class);
} }
public static boolean isNetworkEvent(String eventType) { public static boolean isNetworkEvent(String eventType) {

View File

@ -40,7 +40,7 @@ public class OperationTimedoutException extends CloudException {
boolean _isActive; boolean _isActive;
public OperationTimedoutException(Command[] cmds, long agentId, long seqId, int time, boolean isActive) { public OperationTimedoutException(Command[] cmds, long agentId, long seqId, int time, boolean isActive) {
super("Commands " + seqId + " to Host " + agentId + " timed out after " + time + " secs"); super("Commands " + seqId + " to Host " + agentId + " timed out after " + time);
_agentId = agentId; _agentId = agentId;
_seqId = seqId; _seqId = seqId;
_time = time; _time = time;

View File

@ -26,19 +26,17 @@ public interface Investigator extends Adapter {
* Returns if the vm is still alive. * Returns if the vm is still alive.
* *
* @param vm to work on. * @param vm to work on.
* @return true if vm is alive, otherwise false
*/ */
boolean isVmAlive(VirtualMachine vm, Host host) throws UnknownVM; public boolean isVmAlive(VirtualMachine vm, Host host) throws UnknownVM;
/** public Status isAgentAlive(Host agent);
* Returns the agent status of the host.
*
* @param host
* @return status of the host agent
*/
Status getHostAgentStatus(Host host);
class UnknownVM extends Exception { class UnknownVM extends Exception {
/**
*
*/
private static final long serialVersionUID = 1L; private static final long serialVersionUID = 1L;
}; };
} }

View File

@ -57,9 +57,6 @@ public interface Host extends StateObject<Status>, Identity, Partition, HAResour
String HOST_UEFI_ENABLE = "host.uefi.enable"; String HOST_UEFI_ENABLE = "host.uefi.enable";
String HOST_VOLUME_ENCRYPTION = "host.volume.encryption"; String HOST_VOLUME_ENCRYPTION = "host.volume.encryption";
String HOST_INSTANCE_CONVERSION = "host.instance.conversion"; String HOST_INSTANCE_CONVERSION = "host.instance.conversion";
String HOST_VDDK_SUPPORT = "host.vddk.support";
String HOST_VDDK_LIB_DIR = "vddk.lib.dir";
String HOST_VDDK_VERSION = "host.vddk.version";
String HOST_OVFTOOL_VERSION = "host.ovftool.version"; String HOST_OVFTOOL_VERSION = "host.ovftool.version";
String HOST_VIRTV2V_VERSION = "host.virtv2v.version"; String HOST_VIRTV2V_VERSION = "host.virtv2v.version";
String HOST_SSH_PORT = "host.ssh.port"; String HOST_SSH_PORT = "host.ssh.port";
@ -186,8 +183,6 @@ public interface Host extends StateObject<Status>, Identity, Partition, HAResour
*/ */
Long getManagementServerId(); Long getManagementServerId();
Long getLastManagementServerId();
/* /*
*@return removal date *@return removal date
*/ */
@ -222,6 +217,4 @@ public interface Host extends StateObject<Status>, Identity, Partition, HAResour
ResourceState getResourceState(); ResourceState getResourceState();
CPU.CPUArch getArch(); CPU.CPUArch getArch();
String getStorageAccessGroups();
} }

View File

@ -36,4 +36,5 @@ public interface HostStats {
public HostStats getHostStats(); public HostStats getHostStats();
public double getLoadAverage(); public double getLoadAverage();
// public double getXapiMemoryUsageKBs();
} }

View File

@ -127,7 +127,6 @@ public enum Status {
s_fsm.addTransition(Status.Connecting, Event.HostDown, Status.Down); s_fsm.addTransition(Status.Connecting, Event.HostDown, Status.Down);
s_fsm.addTransition(Status.Connecting, Event.Ping, Status.Connecting); s_fsm.addTransition(Status.Connecting, Event.Ping, Status.Connecting);
s_fsm.addTransition(Status.Connecting, Event.ManagementServerDown, Status.Disconnected); s_fsm.addTransition(Status.Connecting, Event.ManagementServerDown, Status.Disconnected);
s_fsm.addTransition(Status.Connecting, Event.StartAgentRebalance, Status.Rebalancing);
s_fsm.addTransition(Status.Connecting, Event.AgentDisconnected, Status.Alert); s_fsm.addTransition(Status.Connecting, Event.AgentDisconnected, Status.Alert);
s_fsm.addTransition(Status.Up, Event.PingTimeout, Status.Alert); s_fsm.addTransition(Status.Up, Event.PingTimeout, Status.Alert);
s_fsm.addTransition(Status.Up, Event.AgentDisconnected, Status.Alert); s_fsm.addTransition(Status.Up, Event.AgentDisconnected, Status.Alert);

View File

@ -56,7 +56,6 @@ public class Hypervisor {
public static final HypervisorType Ovm3 = new HypervisorType("Ovm3", ImageFormat.RAW); public static final HypervisorType Ovm3 = new HypervisorType("Ovm3", ImageFormat.RAW);
public static final HypervisorType LXC = new HypervisorType("LXC"); public static final HypervisorType LXC = new HypervisorType("LXC");
public static final HypervisorType Custom = new HypervisorType("Custom", null, EnumSet.of(RootDiskSizeOverride)); public static final HypervisorType Custom = new HypervisorType("Custom", null, EnumSet.of(RootDiskSizeOverride));
public static final HypervisorType External = new HypervisorType("External", null, EnumSet.of(RootDiskSizeOverride));
public static final HypervisorType Any = new HypervisorType("Any"); /*If you don't care about the hypervisor type*/ public static final HypervisorType Any = new HypervisorType("Any"); /*If you don't care about the hypervisor type*/
private final String name; private final String name;
private final ImageFormat imageFormat; private final ImageFormat imageFormat;

View File

@ -18,26 +18,14 @@ package com.cloud.kubernetes.cluster;
import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.ControlledEntity;
import java.util.List;
import java.util.Map;
import com.cloud.user.Account; import com.cloud.user.Account;
import com.cloud.uservm.UserVm; import com.cloud.uservm.UserVm;
import com.cloud.utils.component.Adapter; import com.cloud.utils.component.Adapter;
public interface KubernetesServiceHelper extends Adapter { public interface KubernetesServiceHelper extends Adapter {
enum KubernetesClusterNodeType {
CONTROL, WORKER, ETCD, DEFAULT
}
ControlledEntity findByUuid(String uuid); ControlledEntity findByUuid(String uuid);
ControlledEntity findByVmId(long vmId); ControlledEntity findByVmId(long vmId);
void checkVmCanBeDestroyed(UserVm userVm); void checkVmCanBeDestroyed(UserVm userVm);
void checkVmAffinityGroupsCanBeUpdated(UserVm userVm);
boolean isValidNodeType(String nodeType);
Map<String, Long> getServiceOfferingNodeTypeMap(Map<String, Map<String, String>> serviceOfferingNodeTypeMap);
Map<String, Long> getTemplateNodeTypeMap(Map<String, Map<String, String>> templateNodeTypeMap);
Map<String, List<Long>> getAffinityGroupNodeTypeMap(Map<String, Map<String, String>> affinityGroupNodeTypeMap);
void cleanupForAccount(Account account); void cleanupForAccount(Account account);
} }

View File

@ -99,5 +99,4 @@ public interface IpAddress extends ControlledEntity, Identity, InternalIdentity,
boolean isForSystemVms(); boolean isForSystemVms();
boolean isForRouter();
} }

View File

@ -206,7 +206,6 @@ public interface Network extends ControlledEntity, StateObject<Network.State>, I
public static final Provider Tungsten = new Provider("Tungsten", false); public static final Provider Tungsten = new Provider("Tungsten", false);
public static final Provider Nsx = new Provider("Nsx", false); public static final Provider Nsx = new Provider("Nsx", false);
public static final Provider Netris = new Provider("Netris", false);
private final String name; private final String name;
private final boolean isExternal; private final boolean isExternal;
@ -510,6 +509,4 @@ public interface Network extends ControlledEntity, StateObject<Network.State>, I
Integer getPrivateMtu(); Integer getPrivateMtu();
Integer getNetworkCidrSize(); Integer getNetworkCidrSize();
boolean getKeepMacAddressOnPublicNic();
} }

View File

@ -309,8 +309,6 @@ public interface NetworkModel {
NicProfile getNicProfile(VirtualMachine vm, long networkId, String broadcastUri); NicProfile getNicProfile(VirtualMachine vm, long networkId, String broadcastUri);
NicProfile getNicProfile(VirtualMachine vm, Nic nic, DataCenter dataCenter);
Set<Long> getAvailableIps(Network network, String requestedIp); Set<Long> getAvailableIps(Network network, String requestedIp);
String getDomainNetworkDomain(long domainId, long zoneId); String getDomainNetworkDomain(long domainId, long zoneId);

View File

@ -385,11 +385,6 @@ public class NetworkProfile implements Network {
return networkCidrSize; return networkCidrSize;
} }
@Override
public boolean getKeepMacAddressOnPublicNic() {
return true;
}
@Override @Override
public String toString() { public String toString() {
return String.format("NetworkProfile %s", return String.format("NetworkProfile %s",

View File

@ -19,6 +19,7 @@ package com.cloud.network;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import com.cloud.dc.DataCenter;
import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.ControlledEntity;
import org.apache.cloudstack.api.command.admin.address.ReleasePodIpCmdByAdmin; import org.apache.cloudstack.api.command.admin.address.ReleasePodIpCmdByAdmin;
import org.apache.cloudstack.api.command.admin.network.DedicateGuestVlanRangeCmd; import org.apache.cloudstack.api.command.admin.network.DedicateGuestVlanRangeCmd;
@ -38,16 +39,13 @@ import org.apache.cloudstack.api.command.user.network.UpdateNetworkCmd;
import org.apache.cloudstack.api.command.user.vm.ListNicsCmd; import org.apache.cloudstack.api.command.user.vm.ListNicsCmd;
import org.apache.cloudstack.api.response.AcquirePodIpCmdResponse; import org.apache.cloudstack.api.response.AcquirePodIpCmdResponse;
import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.network.element.InternalLoadBalancerElementService;
import com.cloud.agent.api.to.NicTO;
import com.cloud.dc.DataCenter;
import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientAddressCapacityException; import com.cloud.exception.InsufficientAddressCapacityException;
import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.ResourceUnavailableException; import com.cloud.exception.ResourceUnavailableException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.network.Network.IpAddresses; import com.cloud.network.Network.IpAddresses;
import com.cloud.network.Network.Service; import com.cloud.network.Network.Service;
import com.cloud.network.Networks.TrafficType; import com.cloud.network.Networks.TrafficType;
@ -59,6 +57,7 @@ import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.Nic; import com.cloud.vm.Nic;
import com.cloud.vm.NicSecondaryIp; import com.cloud.vm.NicSecondaryIp;
import org.apache.cloudstack.network.element.InternalLoadBalancerElementService;
/** /**
* The NetworkService interface is the "public" api to entities that make requests to the orchestration engine * The NetworkService interface is the "public" api to entities that make requests to the orchestration engine
@ -273,12 +272,4 @@ public interface NetworkService {
InternalLoadBalancerElementService getInternalLoadBalancerElementByNetworkServiceProviderId(long networkProviderId); InternalLoadBalancerElementService getInternalLoadBalancerElementByNetworkServiceProviderId(long networkProviderId);
InternalLoadBalancerElementService getInternalLoadBalancerElementById(long providerId); InternalLoadBalancerElementService getInternalLoadBalancerElementById(long providerId);
List<InternalLoadBalancerElementService> getInternalLoadBalancerElements(); List<InternalLoadBalancerElementService> getInternalLoadBalancerElements();
boolean handleCksIsoOnNetworkVirtualRouter(Long virtualRouterId, boolean mount) throws ResourceUnavailableException;
IpAddresses getIpAddressesFromIps(String ipAddress, String ip6Address, String macAddress);
String getNicVlanValueForExternalVm(NicTO nic);
Long getPreferredNetworkIdForPublicIpRuleAssignment(IpAddress ip, Long networkId);
} }

View File

@ -129,8 +129,7 @@ public class Networks {
UnDecided(null, null), UnDecided(null, null),
OpenDaylight("opendaylight", String.class), OpenDaylight("opendaylight", String.class),
TUNGSTEN("tf", String.class), TUNGSTEN("tf", String.class),
NSX("nsx", String.class), NSX("nsx", String.class);
Netris("netris", String.class);
private final String scheme; private final String scheme;
private final Class<?> type; private final Class<?> type;

View File

@ -41,6 +41,4 @@ public interface PhysicalNetworkTrafficType extends InternalIdentity, Identity {
String getHypervNetworkLabel(); String getHypervNetworkLabel();
String getOvm3NetworkLabel(); String getOvm3NetworkLabel();
String getVlan();
} }

View File

@ -26,7 +26,7 @@ public interface RouterHealthCheckResult {
String getCheckType(); String getCheckType();
VirtualNetworkApplianceService.RouterHealthStatus getCheckResult(); boolean getCheckResult();
Date getLastUpdateTime(); Date getLastUpdateTime();

View File

@ -1,358 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.network;
import java.util.List;
public class SDNProviderNetworkRule {
protected long domainId;
protected long accountId;
protected long zoneId;
protected Long networkResourceId;
protected String networkResourceName;
protected boolean isVpcResource;
protected long vmId;
protected long ruleId;
protected String publicIp;
protected String vmIp;
protected String publicPort;
protected String privatePort;
protected String protocol;
protected String algorithm;
protected List<String> sourceCidrList;
protected List<String> destinationCidrList;
protected Integer icmpCode;
protected Integer icmpType;
protected String trafficType;
protected Network.Service service;
public long getDomainId() {
return domainId;
}
public void setDomainId(long domainId) {
this.domainId = domainId;
}
public long getAccountId() {
return accountId;
}
public void setAccountId(long accountId) {
this.accountId = accountId;
}
public long getZoneId() {
return zoneId;
}
public void setZoneId(long zoneId) {
this.zoneId = zoneId;
}
public Long getNetworkResourceId() {
return networkResourceId;
}
public void setNetworkResourceId(Long networkResourceId) {
this.networkResourceId = networkResourceId;
}
public String getNetworkResourceName() {
return networkResourceName;
}
public void setNetworkResourceName(String networkResourceName) {
this.networkResourceName = networkResourceName;
}
public boolean isVpcResource() {
return isVpcResource;
}
public void setVpcResource(boolean vpcResource) {
isVpcResource = vpcResource;
}
public long getVmId() {
return vmId;
}
public void setVmId(long vmId) {
this.vmId = vmId;
}
public long getRuleId() {
return ruleId;
}
public void setRuleId(long ruleId) {
this.ruleId = ruleId;
}
public String getPublicIp() {
return publicIp;
}
public void setPublicIp(String publicIp) {
this.publicIp = publicIp;
}
public String getVmIp() {
return vmIp;
}
public void setVmIp(String vmIp) {
this.vmIp = vmIp;
}
public String getPublicPort() {
return publicPort;
}
public void setPublicPort(String publicPort) {
this.publicPort = publicPort;
}
public String getPrivatePort() {
return privatePort;
}
public void setPrivatePort(String privatePort) {
this.privatePort = privatePort;
}
public String getProtocol() {
return protocol;
}
public void setProtocol(String protocol) {
this.protocol = protocol;
}
public void setAlgorithm(String algorithm) {
this.algorithm = algorithm;
}
public String getAlgorithm() {
return algorithm;
}
public Network.Service getService() {
return service;
}
public void setService(Network.Service service) {
this.service = service;
}
public Integer getIcmpCode() {
return icmpCode;
}
public void setIcmpCode(Integer icmpCode) {
this.icmpCode = icmpCode;
}
public Integer getIcmpType() {
return icmpType;
}
public void setIcmpType(Integer icmpType) {
this.icmpType = icmpType;
}
public List<String> getSourceCidrList() {
return sourceCidrList;
}
public void setSourceCidrList(List<String> sourceCidrList) {
this.sourceCidrList = sourceCidrList;
}
public List<String> getDestinationCidrList() {
return destinationCidrList;
}
public void setDestinationCidrList(List<String> destinationCidrList) {
this.destinationCidrList = destinationCidrList;
}
public String getTrafficType() {
return trafficType;
}
public void setTrafficType(String trafficType) {
this.trafficType = trafficType;
}
public static class Builder {
public long domainId;
public long accountId;
public long zoneId;
public Long networkResourceId;
public String networkResourceName;
public boolean isVpcResource;
public long vmId;
public long ruleId;
public String publicIp;
public String vmIp;
public String publicPort;
public String privatePort;
public String protocol;
public String algorithm;
public List<String> sourceCidrList;
public List<String> destinationCidrList;
public String trafficType;
public Integer icmpType;
public Integer icmpCode;
public Network.Service service;
public Builder() {
// Default constructor
}
public Builder setDomainId(long domainId) {
this.domainId = domainId;
return this;
}
public Builder setAccountId(long accountId) {
this.accountId = accountId;
return this;
}
public Builder setZoneId(long zoneId) {
this.zoneId = zoneId;
return this;
}
public Builder setNetworkResourceId(Long networkResourceId) {
this.networkResourceId = networkResourceId;
return this;
}
public Builder setNetworkResourceName(String networkResourceName) {
this.networkResourceName = networkResourceName;
return this;
}
public Builder setVpcResource(boolean isVpcResource) {
this.isVpcResource = isVpcResource;
return this;
}
public Builder setVmId(long vmId) {
this.vmId = vmId;
return this;
}
public Builder setRuleId(long ruleId) {
this.ruleId = ruleId;
return this;
}
public Builder setPublicIp(String publicIp) {
this.publicIp = publicIp;
return this;
}
public Builder setVmIp(String vmIp) {
this.vmIp = vmIp;
return this;
}
public Builder setPublicPort(String publicPort) {
this.publicPort = publicPort;
return this;
}
public Builder setPrivatePort(String privatePort) {
this.privatePort = privatePort;
return this;
}
public Builder setProtocol(String protocol) {
this.protocol = protocol;
return this;
}
public Builder setAlgorithm(String algorithm) {
this.algorithm = algorithm;
return this;
}
public Builder setTrafficType(String trafficType) {
this.trafficType = trafficType;
return this;
}
public Builder setIcmpType(Integer icmpType) {
this.icmpType = icmpType;
return this;
}
public Builder setIcmpCode(Integer icmpCode) {
this.icmpCode = icmpCode;
return this;
}
public Builder setSourceCidrList(List<String> sourceCidrList) {
this.sourceCidrList = sourceCidrList;
return this;
}
public Builder setDestinationCidrList(List<String> destinationCidrList) {
this.destinationCidrList = destinationCidrList;
return this;
}
public Builder setService(Network.Service service) {
this.service = service;
return this;
}
public SDNProviderNetworkRule build() {
SDNProviderNetworkRule rule = new SDNProviderNetworkRule();
rule.setDomainId(this.domainId);
rule.setAccountId(this.accountId);
rule.setZoneId(this.zoneId);
rule.setNetworkResourceId(this.networkResourceId);
rule.setNetworkResourceName(this.networkResourceName);
rule.setVpcResource(this.isVpcResource);
rule.setVmId(this.vmId);
rule.setVmIp(this.vmIp);
rule.setPublicIp(this.publicIp);
rule.setPublicPort(this.publicPort);
rule.setPrivatePort(this.privatePort);
rule.setProtocol(this.protocol);
rule.setRuleId(this.ruleId);
rule.setAlgorithm(this.algorithm);
rule.setIcmpType(this.icmpType);
rule.setIcmpCode(this.icmpCode);
rule.setSourceCidrList(this.sourceCidrList);
rule.setDestinationCidrList(this.destinationCidrList);
rule.setTrafficType(this.trafficType);
rule.setService(service);
return rule;
}
}
}

View File

@ -24,7 +24,7 @@ import org.apache.cloudstack.api.InternalIdentity;
public interface Site2SiteVpnConnection extends ControlledEntity, InternalIdentity, Displayable { public interface Site2SiteVpnConnection extends ControlledEntity, InternalIdentity, Displayable {
enum State { enum State {
Pending, Connecting, Connected, Disconnected, Error, Removed Pending, Connecting, Connected, Disconnected, Error,
} }
@Override @Override

View File

@ -87,8 +87,4 @@ public interface VirtualNetworkApplianceService {
Pair<Boolean, String> performRouterHealthChecks(long routerId); Pair<Boolean, String> performRouterHealthChecks(long routerId);
<T extends VirtualRouter> void collectNetworkStatistics(T router, Nic nic); <T extends VirtualRouter> void collectNetworkStatistics(T router, Nic nic);
enum RouterHealthStatus{
SUCCESS, FAILED, WARNING, UNKNOWN;
}
} }

View File

@ -70,8 +70,6 @@ public interface AutoScaleService {
Counter createCounter(CreateCounterCmd cmd); Counter createCounter(CreateCounterCmd cmd);
Counter getCounter(long counterId);
boolean deleteCounter(long counterId) throws ResourceInUseException; boolean deleteCounter(long counterId) throws ResourceInUseException;
List<? extends Counter> listCounters(ListCountersCmd cmd); List<? extends Counter> listCounters(ListCountersCmd cmd);

View File

@ -23,7 +23,6 @@ import com.cloud.deploy.DeployDestination;
import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.ResourceUnavailableException; import com.cloud.exception.ResourceUnavailableException;
import com.cloud.network.IpAddress;
import com.cloud.network.Network; import com.cloud.network.Network;
import com.cloud.network.Network.Capability; import com.cloud.network.Network.Capability;
import com.cloud.network.Network.Provider; import com.cloud.network.Network.Provider;
@ -88,14 +87,6 @@ public interface NetworkElement extends Adapter {
boolean release(Network network, NicProfile nic, VirtualMachineProfile vm, ReservationContext context) throws ConcurrentOperationException, boolean release(Network network, NicProfile nic, VirtualMachineProfile vm, ReservationContext context) throws ConcurrentOperationException,
ResourceUnavailableException; ResourceUnavailableException;
/**
* Release IP from the network provider if reserved
* @param ipAddress
*/
default boolean releaseIp(IpAddress ipAddress) {
return true;
}
/** /**
* The network is being shutdown. * The network is being shutdown.
* @param network * @param network

View File

@ -17,40 +17,12 @@
package com.cloud.network.element; package com.cloud.network.element;
import java.util.List; import java.util.List;
import java.util.Objects;
import com.cloud.exception.ResourceUnavailableException; import com.cloud.exception.ResourceUnavailableException;
import com.cloud.network.Network; import com.cloud.network.Network;
import com.cloud.network.rules.FirewallRule;
import com.cloud.network.rules.PortForwardingRule; import com.cloud.network.rules.PortForwardingRule;
import com.cloud.network.vpc.NetworkACLItem;
public interface PortForwardingServiceProvider extends NetworkElement, IpDeployingRequester { public interface PortForwardingServiceProvider extends NetworkElement, IpDeployingRequester {
static String getPublicPortRange(PortForwardingRule rule) {
return Objects.equals(rule.getSourcePortStart(), rule.getSourcePortEnd()) ?
String.valueOf(rule.getSourcePortStart()) :
String.valueOf(rule.getSourcePortStart()).concat("-").concat(String.valueOf(rule.getSourcePortEnd()));
}
static String getPrivatePFPortRange(PortForwardingRule rule) {
return rule.getDestinationPortStart() == rule.getDestinationPortEnd() ?
String.valueOf(rule.getDestinationPortStart()) :
String.valueOf(rule.getDestinationPortStart()).concat("-").concat(String.valueOf(rule.getDestinationPortEnd()));
}
static String getPrivatePortRange(FirewallRule rule) {
return Objects.equals(rule.getSourcePortStart(), rule.getSourcePortEnd()) ?
String.valueOf(rule.getSourcePortStart()) :
String.valueOf(rule.getSourcePortStart()).concat("-").concat(String.valueOf(rule.getSourcePortEnd()));
}
static String getPrivatePortRangeForACLRule(NetworkACLItem rule) {
return Objects.equals(rule.getSourcePortStart(), rule.getSourcePortEnd()) ?
String.valueOf(rule.getSourcePortStart()) :
String.valueOf(rule.getSourcePortStart()).concat("-").concat(String.valueOf(rule.getSourcePortEnd()));
}
/** /**
* Apply rules * Apply rules
* @param network * @param network

View File

@ -55,8 +55,4 @@ public interface VpcProvider extends NetworkElement {
boolean applyACLItemsToPrivateGw(PrivateGateway gateway, List<? extends NetworkACLItem> rules) throws ResourceUnavailableException; boolean applyACLItemsToPrivateGw(PrivateGateway gateway, List<? extends NetworkACLItem> rules) throws ResourceUnavailableException;
boolean updateVpcSourceNatIp(Vpc vpc, IpAddress address); boolean updateVpcSourceNatIp(Vpc vpc, IpAddress address);
default boolean updateVpc(Vpc vpc, String previousVpcName) {
return true;
}
} }

View File

@ -215,8 +215,4 @@ public interface NetworkGuru extends Adapter {
default boolean isSlaacV6Only() { default boolean isSlaacV6Only() {
return true; return true;
} }
default boolean update(Network network, String prevNetworkName) {
return true;
}
} }

View File

@ -41,23 +41,13 @@ import com.cloud.utils.net.Ip;
public interface LoadBalancingRulesService { public interface LoadBalancingRulesService {
/** /**
* Create a load balancer rule from the given ipAddress/port to the given private port * Create a load balancer rule from the given ipAddress/port to the given private port
* @param xId an existing UUID for this rule (for instance a device generated one)
* @param name
* @param description
* @param srcPortStart
* @param srcPortEnd
* @param defPortStart
* @param defPortEnd
* @param ipAddrId
* @param protocol
* @param algorithm
* @param networkId
* @param lbOwnerId
* @param openFirewall * @param openFirewall
* @param lbProtocol * TODO
* @param forDisplay * @param forDisplay TODO
* @param cmd
* the command specifying the ip address, public port, protocol, private port, and algorithm
*
* @return the newly created LoadBalancerVO if successful, null otherwise * @return the newly created LoadBalancerVO if successful, null otherwise
* @throws NetworkRuleConflictException
* @throws InsufficientAddressCapacityException * @throws InsufficientAddressCapacityException
*/ */
LoadBalancer createPublicLoadBalancerRule(String xId, String name, String description, int srcPortStart, int srcPortEnd, int defPortStart, int defPortEnd, LoadBalancer createPublicLoadBalancerRule(String xId, String name, String description, int srcPortStart, int srcPortEnd, int defPortStart, int defPortEnd,
@ -108,7 +98,7 @@ public interface LoadBalancingRulesService {
/** /**
* Assign a virtual machine or list of virtual machines, or Map of <vmId vmIp> to a load balancer. * Assign a virtual machine or list of virtual machines, or Map of <vmId vmIp> to a load balancer.
*/ */
boolean assignToLoadBalancer(long lbRuleId, List<Long> vmIds, Map<Long, List<String>> vmIdIpMap, Map<Long, Long> vmIdNetworkMap, boolean isAutoScaleVM); boolean assignToLoadBalancer(long lbRuleId, List<Long> vmIds, Map<Long, List<String>> vmIdIpMap, boolean isAutoScaleVM);
boolean assignSSLCertToLoadBalancerRule(Long lbRuleId, String certName, String publicCert, String privateKey); boolean assignSSLCertToLoadBalancerRule(Long lbRuleId, String certName, String publicCert, String privateKey);
@ -116,7 +106,7 @@ public interface LoadBalancingRulesService {
boolean applyLoadBalancerConfig(long lbRuleId) throws ResourceUnavailableException; boolean applyLoadBalancerConfig(long lbRuleId) throws ResourceUnavailableException;
boolean assignCertToLoadBalancer(long lbRuleId, Long certId, boolean isForced); boolean assignCertToLoadBalancer(long lbRuleId, Long certId);
boolean removeCertFromLoadBalancer(long lbRuleId); boolean removeCertFromLoadBalancer(long lbRuleId);

View File

@ -1,41 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.network.netris;
public class NetrisLbBackend {
private long vmId;
private String vmIp;
private int port;
public NetrisLbBackend(long vmId, String vmIp, int port) {
this.vmId = vmId;
this.vmIp = vmIp;
this.port = port;
}
public long getVmId() {
return vmId;
}
public String getVmIp() {
return vmIp;
}
public int getPort() {
return port;
}
}

View File

@ -1,108 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.network.netris;
import com.cloud.network.SDNProviderNetworkRule;
import java.util.List;
public class NetrisNetworkRule {
public enum NetrisRuleAction {
PERMIT, DENY
}
private SDNProviderNetworkRule baseRule;
private NetrisRuleAction aclAction;
private List<NetrisLbBackend> lbBackends;
private String lbRuleName;
private String lbCidrList;
private String reason;
public NetrisNetworkRule(Builder builder) {
this.baseRule = builder.baseRule;
this.aclAction = builder.aclAction;
this.lbBackends = builder.lbBackends;
this.reason = builder.reason;
this.lbCidrList = builder.lbCidrList;
this.lbRuleName = builder.lbRuleName;
}
public NetrisRuleAction getAclAction() {
return aclAction;
}
public List<NetrisLbBackend> getLbBackends() {
return lbBackends;
}
public String getReason() {
return reason;
}
public String getLbCidrList() {return lbCidrList; }
public String getLbRuleName() { return lbRuleName; }
public SDNProviderNetworkRule getBaseRule() {
return baseRule;
}
// Builder class extending the parent builder
public static class Builder {
private SDNProviderNetworkRule baseRule;
private NetrisRuleAction aclAction;
private List<NetrisLbBackend> lbBackends;
private String reason;
private String lbCidrList;
private String lbRuleName;
public Builder baseRule(SDNProviderNetworkRule baseRule) {
this.baseRule = baseRule;
return this;
}
public Builder aclAction(NetrisRuleAction aclAction) {
this.aclAction = aclAction;
return this;
}
public Builder lbBackends(List<NetrisLbBackend> lbBackends) {
this.lbBackends = lbBackends;
return this;
}
public Builder reason(String reason) {
this.reason = reason;
return this;
}
public Builder lbCidrList(String lbCidrList) {
this.lbCidrList = lbCidrList;
return this;
}
public Builder lbRuleName(String lbRuleName) {
this.lbRuleName = lbRuleName;
return this;
}
public NetrisNetworkRule build() {
return new NetrisNetworkRule(this);
}
}
}

View File

@ -1,30 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.network.netris;
import org.apache.cloudstack.api.Identity;
import org.apache.cloudstack.api.InternalIdentity;
public interface NetrisProvider extends InternalIdentity, Identity {
long getZoneId();
String getName();
String getUrl();
String getUsername();
String getSiteName();
String getTenantName();
String getNetrisTag();
}

Some files were not shown because too many files have changed in this diff Show More