mirror of https://github.com/apache/cloudstack.git
CLOUDSTACK-7143: more robust box cleanup
Having experimented with many edge cases of running multiple build.sh commands in parallel / against busy virtualbox setups, the only really reliable way to produce consistent images is to not do these commands in parallel and to not do them while the machine is doing many other things. If virtualbox or the machine that hosts it is very busy, and/or it has a lot of disks it knows/knew about, and/or its tuesday, behavior may be a bit different. Realizing this reality, this commit adds some scripts that try really hard to set virtualbox back to known/healthy state before building.
This commit is contained in:
parent
d658fc4637
commit
be8b2d7c21
|
|
@ -18,3 +18,4 @@
|
|||
source 'https://rubygems.org'
|
||||
gem 'veewee', :git => 'https://github.com/jedi4ever/veewee.git'
|
||||
gem 'em-winrm'
|
||||
gem 'sys-proctable'
|
||||
|
|
|
|||
|
|
@ -37,6 +37,10 @@ Usage:
|
|||
(or use command line arg, default i386, other option amd64)
|
||||
* Set \$ssh_key to provide root ssh public key to inject
|
||||
(or use command line arg, default set in the veewee definition its authorized_keys.sh)
|
||||
* Set \$clean_vbox to try pretty hard to remove all our vms and disk from
|
||||
virtualbox before and after running the rest of the build. This should
|
||||
not be needed since we try hard to use VBoxManage nicely, but, various
|
||||
error conditions / timing issues are quite hard to fully contain
|
||||
* Set \$DEBUG=1 to enable debug logging
|
||||
* Set \$TRACE=1 to enable trace logging
|
||||
* Set \$VEEWEE_ARGS to pass veewee custom arguments
|
||||
|
|
@ -115,6 +119,9 @@ export VM_ARCH="${arch}"
|
|||
# server control
|
||||
ssh_key="${6:-${ssh_key:-}}"
|
||||
|
||||
# whether to attempt to clean up all our virtualbox vms/disks before/after run
|
||||
clean_vbox="${clean_vbox:-}"
|
||||
|
||||
# while building with vbox, we need a quite unique appliance name in order to prevent conflicts with multiple
|
||||
# concurrent executors on jenkins
|
||||
if [ -z "${branch}" ] ; then
|
||||
|
|
@ -266,6 +273,17 @@ function setup_ruby() {
|
|||
bundle check || bundle install ${bundle_args}
|
||||
}
|
||||
|
||||
function stop_vbox() {
|
||||
log INFO "stoppping all virtualbox vms for ${USER}"
|
||||
bundle exec ./vbox_vm_clean.rb
|
||||
}
|
||||
|
||||
function clean_vbox() {
|
||||
log INFO "deleting all virtualbox vms and disks for ${USER}"
|
||||
bundle exec ./vbox_vm_clean.rb --delete
|
||||
bundle exec ./vbox_disk_clean.rb
|
||||
}
|
||||
|
||||
function prepare() {
|
||||
log INFO "preparing for build"
|
||||
setup_ruby
|
||||
|
|
@ -283,9 +301,11 @@ function veewee_destroy() {
|
|||
function veewee_build() {
|
||||
log INFO "building new image with veewee"
|
||||
bundle exec veewee vbox build "${appliance_build_name}" ${VEEWEE_BUILD_ARGS}
|
||||
# vbox export wants to run vbox halt itself, so don't halt!
|
||||
# bundle exec veewee vbox halt "${appliance_build_name}" ${VEEWEE_ARGS}
|
||||
bundle exec veewee vbox export "${appliance_build_name}" ${VEEWEE_ARGS}
|
||||
}
|
||||
|
||||
function veewee_halt() {
|
||||
log INFO "shutting down new vm with veewee"
|
||||
bundle exec veewee vbox halt "${appliance_build_name}" ${VEEWEE_ARGS}
|
||||
}
|
||||
|
||||
function check_appliance_shutdown() {
|
||||
|
|
@ -300,6 +320,41 @@ function check_appliance_shutdown() {
|
|||
return ${result}
|
||||
}
|
||||
|
||||
function check_appliance_disk_ready() {
|
||||
log INFO "waiting for veewee appliance disk to be available..."
|
||||
# local hdd_path="vboxmanage showvminfo '${appliance_build_name}' --machinereadable | \
|
||||
# egrep '(SATA|IDE) Controller-[0-9]+-[0-9]+' | grep -v '.iso' | \
|
||||
# grep -v '="none"' | egrep -o '=".*"' | sed 's/=//' | sed 's/"//g'"
|
||||
local hdd_path=`vboxmanage list hdds | grep "${appliance_build_name}\/" | grep vdi | \
|
||||
cut -c 14- | sed ${sed_regex_option} 's/^ *//'`
|
||||
disk_state=`vboxmanage showhdinfo "${hdd_path}" | egrep '^State:' | sed 's/State://' | egrep -o '[a-zA-Z]+' | awk '{print tolower($0)}'`
|
||||
if [ "${disk_state}" == "notcreated" ]; then
|
||||
log ERROR "disk ${hdd_path} in state notcreated"
|
||||
return 1
|
||||
elif [ "${disk_state}" == "created" ]; then
|
||||
log INFO "disk ${hdd_path} in state created"
|
||||
return 0
|
||||
elif [ "${disk_state}" == "lockedread" ]; then
|
||||
log INFO "disk ${hdd_path} in state lockedread"
|
||||
return 1
|
||||
elif [ "${disk_state}" == "lockedwrite" ]; then
|
||||
log INFO "disk ${hdd_path} in state lockedwrite"
|
||||
return 1
|
||||
elif [ "${disk_state}" == "inaccessible" ]; then
|
||||
log INFO "disk ${hdd_path} in state inaccessible"
|
||||
return 1
|
||||
elif [ "${disk_state}" == "creating" ]; then
|
||||
log WARN "disk ${hdd_path} in state creating"
|
||||
return 1
|
||||
elif [ "${disk_state}" == "deleting" ]; then
|
||||
log WARN "disk ${hdd_path} in state deleting"
|
||||
return 1
|
||||
else
|
||||
log WARN "disk ${hdd_path} has unknown disk state ${disk_state}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function remove_shares() {
|
||||
log INFO "removing shared folders from appliance..."
|
||||
set +e
|
||||
|
|
@ -407,12 +462,21 @@ function hyperv_export() {
|
|||
###
|
||||
|
||||
function main() {
|
||||
if [ "${clean_vbox}" == "1" ]; then
|
||||
clean_vbox --delete
|
||||
add_on_exit clean_vbox --delete
|
||||
else
|
||||
stop_vbox # some extra encouragement for virtualbox to stop things
|
||||
fi
|
||||
prepare
|
||||
create_definition
|
||||
veewee_destroy # in case of left-over cruft from failed build
|
||||
add_on_exit veewee_destroy
|
||||
veewee_build
|
||||
veewee_halt
|
||||
stop_vbox # some extra encouragement for virtualbox to stop things
|
||||
retry 10 check_appliance_shutdown
|
||||
retry 10 check_appliance_disk_ready
|
||||
retry 10 remove_shares
|
||||
|
||||
# Get appliance uuids
|
||||
|
|
@ -427,7 +491,7 @@ function main() {
|
|||
kvm_export "${hdd_path}"
|
||||
vmware_export "${machine_uuid}" "${hdd_uuid}"
|
||||
hyperv_export "${hdd_uuid}"
|
||||
log INFO "BUILD SUCCESSFUL"
|
||||
add_on_exit log INFO "BUILD SUCCESSFUL"
|
||||
}
|
||||
|
||||
# we only run main() if not source-d
|
||||
|
|
|
|||
|
|
@ -0,0 +1,33 @@
|
|||
#!/usr/bin/env ruby
|
||||
|
||||
lines = `VBoxManage list hdds`
|
||||
disks = lines.split(/\n\s*\n/)
|
||||
disks.each do |disk|
|
||||
disk_lines = disk.split(/\n/)
|
||||
disk_config = {}
|
||||
disk_lines.each do |line|
|
||||
pair = line.split(/:\s*/)
|
||||
disk_config[pair[0]] = pair[1]
|
||||
# if pair[0] == 'Location'
|
||||
# location = pair[1]
|
||||
|
||||
# if location.include? '/Snapshots/'
|
||||
# disk_config['is_snapshot'] = true
|
||||
# end
|
||||
# if location.include? '/VirtualBox VMs/'
|
||||
# disk_config['vm_name'] = location.split('/VirtualBox VMs/')[1].split('/')[0]
|
||||
# disk_config['disk_name'] = location.split('/')[-1]
|
||||
# disk_config['is_virtualbox_vm'] = true
|
||||
# else
|
||||
# disk_config['is_virtualbox_vm'] = false
|
||||
# disk_config['disk_name'] = location.split('/')[-1]
|
||||
# end
|
||||
# end
|
||||
end
|
||||
|
||||
if disk_config.include? 'Location'
|
||||
cmd="VBoxManage closemedium disk '#{disk_config['Location']}' --delete"
|
||||
puts cmd
|
||||
`#{cmd}`
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
#!/usr/bin/env ruby
|
||||
|
||||
# script that tries hard to forcibly shut down all vms
|
||||
|
||||
# gem install sys-proctable
|
||||
require 'sys/proctable'
|
||||
|
||||
include Sys
|
||||
|
||||
do_delete = (ARGV.include? 'delete' or ARGV.include? '--delete' or ARGV.include? '-d')
|
||||
|
||||
lines = `VBoxManage list vms`
|
||||
vms = lines.split(/\n/)
|
||||
vms.each do |vmline|
|
||||
vm_info = /\"(.*)\"[^{]*\{(.*)\}/.match(vmline)
|
||||
vm_name = vm_info[1]
|
||||
vm_uuid = vm_info[2]
|
||||
|
||||
cmd="VBoxManage controlvm #{vm_name} poweroff"
|
||||
puts cmd
|
||||
`#{cmd}`
|
||||
if do_delete
|
||||
sleep(1)
|
||||
cmd="VBoxManage unregistervm #{vm_name} --delete"
|
||||
puts cmd
|
||||
`#{cmd}`
|
||||
end
|
||||
|
||||
sleep(1)
|
||||
# ps x | grep VBoxHeadless | grep systemvm64template-4.4.0 | egrep -o '^\s*[0-9]+' | xargs kill
|
||||
ProcTable.ps { |p|
|
||||
next unless p.cmdline.include? "VBoxHeadless"
|
||||
next unless p.cmdline.include? vm_name
|
||||
# VBoxManage should only list _our_ vms, but just to be safe...
|
||||
next unless p.ruid == Process.uid
|
||||
|
||||
puts "kill -SIGKILL #{p.pid}"
|
||||
begin
|
||||
Process.kill("KILL", p.pid)
|
||||
rescue => exception
|
||||
puts exception.backtrace
|
||||
end
|
||||
sleep(5)
|
||||
puts "kill -SIGTERM #{p.pid}"
|
||||
begin
|
||||
Process.kill("TERM", p.pid)
|
||||
rescue => exception
|
||||
puts exception.backtrace
|
||||
end
|
||||
}
|
||||
end
|
||||
Loading…
Reference in New Issue