From 7cac5aa9fc1ad8e8aafd9189020d594059625db3 Mon Sep 17 00:00:00 2001 From: Likitha Shetty Date: Mon, 16 Dec 2013 15:44:38 +0530 Subject: [PATCH 001/312] CLOUDSTACK-5514. Response of listAccounts API call includes removed users --- server/src/com/cloud/api/query/dao/UserAccountJoinDaoImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/com/cloud/api/query/dao/UserAccountJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/UserAccountJoinDaoImpl.java index 42a1b0f1c4a..923a2382fdd 100644 --- a/server/src/com/cloud/api/query/dao/UserAccountJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/UserAccountJoinDaoImpl.java @@ -107,7 +107,7 @@ public class UserAccountJoinDaoImpl extends GenericDaoBase searchByAccountId(Long accountId) { SearchCriteria sc = vrAcctIdSearch.create(); sc.setParameters("accountId", accountId); - return searchIncludingRemoved(sc, null, null, false); + return search(sc, null, null, false); } } From 3a3fec3cb6bb4f9a008370ea02279d286654b01a Mon Sep 17 00:00:00 2001 From: Kishan Kavala Date: Mon, 16 Dec 2013 17:50:08 +0530 Subject: [PATCH 002/312] CLOUDSTACK-5145 : Added permission checks while deleting network ACLs Conflicts: server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java --- .../cloud/network/vpc/NetworkACLServiceImpl.java | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java b/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java index fbcd461b759..f316999d4ea 100644 --- a/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java +++ b/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java @@ -579,8 +579,21 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ @Override public boolean revokeNetworkACLItem(long ruleId) { NetworkACLItemVO aclItem = _networkACLItemDao.findById(ruleId); +<<<<<<< HEAD if (aclItem != null) { if ((aclItem.getAclId() == NetworkACL.DEFAULT_ALLOW) || (aclItem.getAclId() == NetworkACL.DEFAULT_DENY)) { +======= + if(aclItem != null){ + NetworkACL acl = _networkAclMgr.getNetworkACL(aclItem.getAclId()); + + Vpc vpc = _entityMgr.findById(Vpc.class, acl.getVpcId()); + + Account caller = CallContext.current().getCallingAccount(); + + _accountMgr.checkAccess(caller, null, true, vpc); + + if((aclItem.getAclId() == NetworkACL.DEFAULT_ALLOW) || (aclItem.getAclId() == NetworkACL.DEFAULT_DENY)){ +>>>>>>> e2805b8... CLOUDSTACK-5145 : Added permission checks while deleting network ACLs throw new InvalidParameterValueException("ACL Items in default ACL cannot be deleted"); } } From e2915c6ce57cf8a429a2c8a9b1980faf1c4f8574 Mon Sep 17 00:00:00 2001 From: Kishan Kavala Date: Mon, 16 Dec 2013 18:12:44 +0530 Subject: [PATCH 003/312] CLOUDSTACK-5145 : Added permission checks while deleting network ACLs --- server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java b/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java index f316999d4ea..a95ef1a05fd 100644 --- a/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java +++ b/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java @@ -579,10 +579,6 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ @Override public boolean revokeNetworkACLItem(long ruleId) { NetworkACLItemVO aclItem = _networkACLItemDao.findById(ruleId); -<<<<<<< HEAD - if (aclItem != null) { - if ((aclItem.getAclId() == NetworkACL.DEFAULT_ALLOW) || (aclItem.getAclId() == NetworkACL.DEFAULT_DENY)) { -======= if(aclItem != null){ NetworkACL acl = _networkAclMgr.getNetworkACL(aclItem.getAclId()); @@ -593,7 +589,6 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ _accountMgr.checkAccess(caller, null, true, vpc); if((aclItem.getAclId() == NetworkACL.DEFAULT_ALLOW) || (aclItem.getAclId() == NetworkACL.DEFAULT_DENY)){ ->>>>>>> e2805b8... CLOUDSTACK-5145 : Added permission checks while deleting network ACLs throw new InvalidParameterValueException("ACL Items in default ACL cannot be deleted"); } } From c6c299523151345bbc3c97614c8ac995676e229b Mon Sep 17 00:00:00 2001 From: Murali Reddy Date: Mon, 16 Dec 2013 19:00:29 +0530 Subject: [PATCH 004/312] CLOUDSTACK-4616: When system Vms fail to start when host is down , link local Ip addresses do not get released resulting in all the link local Ip addresses being consumed eventually. fix ensure Nics with reservation strategy 'Start' should go through release phase in the Nic life cycle so that release is performed before Nic is removed to avoid resource leaks. --- .../engine/orchestration/NetworkOrchestrator.java | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index cbe02ff0941..1505d4166d2 100755 --- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -1517,6 +1517,17 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } protected void removeNic(VirtualMachineProfile vm, NicVO nic) { + + if (nic.getReservationStrategy() == Nic.ReservationStrategy.Start && nic.getState() != Nic.State.Allocated) { + // Nics with reservation strategy 'Start' should go through release phase in the Nic life cycle. + // Ensure that release is performed before Nic is to be removed to avoid resource leaks. + try { + releaseNic(vm, nic.getId()); + } catch (Exception ex) { + s_logger.warn("Failed to release nic: " + nic.toString() + " as part of remove operation due to", ex ); + } + } + nic.setState(Nic.State.Deallocating); _nicDao.update(nic.getId(), nic); NetworkVO network = _networksDao.findById(nic.getNetworkId()); From 12adbffbea77b6a21aa8350d0b0effd1c7fb9702 Mon Sep 17 00:00:00 2001 From: Murali Reddy Date: Mon, 16 Dec 2013 19:34:32 +0530 Subject: [PATCH 005/312] CLOUDSTACK-5517: NPE observed during "release portable IPs" as part of account cleanup ensure proper portable ip address are released as part of account cleanup --- server/src/com/cloud/user/AccountManagerImpl.java | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/server/src/com/cloud/user/AccountManagerImpl.java b/server/src/com/cloud/user/AccountManagerImpl.java index 8504ee11705..20a6242cfd6 100755 --- a/server/src/com/cloud/user/AccountManagerImpl.java +++ b/server/src/com/cloud/user/AccountManagerImpl.java @@ -750,11 +750,14 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // release account specific acquired portable IP's. Since all the portable IP's must have been already // disassociated with VPC/guest network (due to deletion), so just mark portable IP as free. - List portableIpsToRelease = _ipAddressDao.listByAccount(accountId); - for (IpAddress ip : portableIpsToRelease) { - s_logger.debug("Releasing portable ip " + ip + " as a part of account id=" + accountId + " cleanup"); - _ipAddrMgr.releasePortableIpAddress(ip.getId()); + List ipsToRelease = _ipAddressDao.listByAccount(accountId); + for (IpAddress ip : ipsToRelease) { + if (ip.isPortable()) { + s_logger.debug("Releasing portable ip " + ip + " as a part of account id=" + accountId + " cleanup"); + _ipAddrMgr.releasePortableIpAddress(ip.getId()); + } } + // release dedication if any List dedicatedResources = _dedicatedDao.listByAccountId(accountId); if (dedicatedResources != null && !dedicatedResources.isEmpty()) { From 6ad0e4913eedffd87636e94ccf607565d96f31d8 Mon Sep 17 00:00:00 2001 From: Brian Federle Date: Mon, 16 Dec 2013 14:00:37 -0800 Subject: [PATCH 006/312] CLOUDSTACK-5276: Remove wrong select column from LB/PF list select --- ui/css/cloudstack3.css | 14 -------------- ui/scripts/ui/widgets/multiEdit.js | 2 ++ 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/ui/css/cloudstack3.css b/ui/css/cloudstack3.css index 7b1dde0ab9f..b06b79a795a 100644 --- a/ui/css/cloudstack3.css +++ b/ui/css/cloudstack3.css @@ -7569,20 +7569,6 @@ div.container div.panel div#details-tab-addloadBalancer.detail-group div.loadBal min-width: 100px; } -div.ui-dialog div.multi-edit-add-list div.view div.data-table table.body tbody tr.even td { - border-right: 1px solid #BFBFBF; - clear: none; - color: #495A76; - font-size: 10px; - margin-right: 25px; - min-width: -moz-available; - max-width: 90px; - overflow: hidden; - padding: 9px 5px 8px 0; - position: relative; - vertical-align: middle; -} - .multi-edit { overflow: auto; } diff --git a/ui/scripts/ui/widgets/multiEdit.js b/ui/scripts/ui/widgets/multiEdit.js index 79bb0d4f869..873775d7221 100755 --- a/ui/scripts/ui/widgets/multiEdit.js +++ b/ui/scripts/ui/widgets/multiEdit.js @@ -515,6 +515,8 @@ uiCustom: true }); + instances.listView.multiSelect = false; + instances.listView.actions = { select: { label: 'Select instance', From 2f53295151820c56c683ed280691ebd479d25ec2 Mon Sep 17 00:00:00 2001 From: Marcus Sorensen Date: Mon, 16 Dec 2013 16:12:43 -0700 Subject: [PATCH 007/312] CLOUDSTACK-5521: Create multi-core topology when deploying KVM virtual machines with many cores --- .../resource/LibvirtComputingResource.java | 11 +- .../hypervisor/kvm/resource/LibvirtVMDef.java | 31 +++- .../LibvirtComputingResourceTest.java | 146 +++++++++++++++++- 3 files changed, 179 insertions(+), 9 deletions(-) diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 7a02f3b5bdc..a2c66829d56 100755 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -3477,12 +3477,21 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } else { grd.setMemorySize(vmTO.getMaxRam() / 1024); } - grd.setVcpuNum(vmTO.getCpus()); + int vcpus = vmTO.getCpus(); + grd.setVcpuNum(vcpus); vm.addComp(grd); CpuModeDef cmd = new CpuModeDef(); cmd.setMode(_guestCpuMode); cmd.setModel(_guestCpuModel); + // multi cores per socket, for larger core configs + if (vcpus % 6 == 0) { + int sockets = vcpus / 6; + cmd.setTopology(6, sockets); + } else if (vcpus % 4 == 0) { + int sockets = vcpus / 4; + cmd.setTopology(4, sockets); + } vm.addComp(cmd); if (_hypervisorLibvirtVersion >= 9000) { diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java index 3e44e941490..9fd058fa3eb 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java @@ -949,6 +949,8 @@ public class LibvirtVMDef { public static class CpuModeDef { private String _mode; private String _model; + private int _coresPerSocket = -1; + private int _sockets = -1; public void setMode(String mode) { _mode = mode; @@ -958,17 +960,34 @@ public class LibvirtVMDef { _model = model; } + public void setTopology(int coresPerSocket, int sockets) { + _coresPerSocket = coresPerSocket; + _sockets = sockets; + } + @Override public String toString() { - StringBuilder modeBuidler = new StringBuilder(); - if ("custom".equalsIgnoreCase(_mode) && _model != null) { - modeBuidler.append("" + _model + ""); + StringBuilder modeBuilder = new StringBuilder(); + + // start cpu def, adding mode, model + if ("custom".equalsIgnoreCase(_mode) && _model != null){ + modeBuilder.append("" + _model + ""); } else if ("host-model".equals(_mode)) { - modeBuidler.append(""); + modeBuilder.append(""); } else if ("host-passthrough".equals(_mode)) { - modeBuidler.append(""); + modeBuilder.append(""); + } else { + modeBuilder.append(""); } - return modeBuidler.toString(); + + // add topology + if (_sockets > 0 && _coresPerSocket > 0) { + modeBuilder.append(""); + } + + // close cpu def + modeBuilder.append(""); + return modeBuilder.toString(); } } diff --git a/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java b/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java index 7b1eade7c47..c8af7f8b82b 100644 --- a/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java +++ b/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java @@ -69,7 +69,7 @@ public class LibvirtComputingResourceTest { int id = _random.nextInt(65534); String name = "test-instance-1"; - int cpus = _random.nextInt(7) + 1; + int cpus = _random.nextInt(2) + 1; int speed = 1024; int minRam = 256 * 1024; int maxRam = 512 * 1024; @@ -124,6 +124,7 @@ public class LibvirtComputingResourceTest { //vmStr += "\n"; //vmStr += "" + (cpus * speed) + "\n"; //vmStr += "\n"; + vmStr += ""; vmStr += "restart\n"; vmStr += "destroy\n"; vmStr += "destroy\n"; @@ -131,6 +132,146 @@ public class LibvirtComputingResourceTest { assertEquals(vmStr, vm.toString()); } + /** + This test verifies that CPU topology is properly set for hex-core + */ + @Test + public void testCreateVMFromSpecWithTopology6() { + int id = _random.nextInt(65534); + String name = "test-instance-1"; + + int cpus = 12; + int minSpeed = 1024; + int maxSpeed = 2048; + int minRam = 256 * 1024; + int maxRam = 512 * 1024; + + String os = "Ubuntu"; + boolean haEnabled = false; + boolean limitCpuUse = false; + + String vncAddr = ""; + String vncPassword = "mySuperSecretPassword"; + + LibvirtComputingResource lcr = new LibvirtComputingResource(); + VirtualMachineTO to = new VirtualMachineTO(id, name, VirtualMachine.Type.User, cpus, minSpeed, maxSpeed, minRam, maxRam, BootloaderType.HVM, os, false, false, vncPassword); + to.setVncAddr(vncAddr); + to.setUuid("b0f0a72d-7efb-3cad-a8ff-70ebf30b3af9"); + + LibvirtVMDef vm = lcr.createVMFromSpec(to); + vm.setHvsType(_hyperVisorType); + + String vmStr = "\n"; + vmStr += "" + name + "\n"; + vmStr += "b0f0a72d-7efb-3cad-a8ff-70ebf30b3af9\n"; + vmStr += "" + os + "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "" + maxRam / 1024 + "\n"; + vmStr += "" + minRam / 1024 + "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "" + cpus + "\n"; + vmStr += "\n"; + vmStr += "hvm\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += ""; + vmStr += "restart\n"; + vmStr += "destroy\n"; + vmStr += "destroy\n"; + vmStr += "\n"; + + assertEquals(vmStr, vm.toString()); + } + + /** + This test verifies that CPU topology is properly set for quad-core + */ + @Test + public void testCreateVMFromSpecWithTopology4() { + int id = _random.nextInt(65534); + String name = "test-instance-1"; + + int cpus = 8; + int minSpeed = 1024; + int maxSpeed = 2048; + int minRam = 256 * 1024; + int maxRam = 512 * 1024; + + String os = "Ubuntu"; + boolean haEnabled = false; + boolean limitCpuUse = false; + + String vncAddr = ""; + String vncPassword = "mySuperSecretPassword"; + + LibvirtComputingResource lcr = new LibvirtComputingResource(); + VirtualMachineTO to = new VirtualMachineTO(id, name, VirtualMachine.Type.User, cpus, minSpeed, maxSpeed, minRam, maxRam, BootloaderType.HVM, os, false, false, vncPassword); + to.setVncAddr(vncAddr); + to.setUuid("b0f0a72d-7efb-3cad-a8ff-70ebf30b3af9"); + + LibvirtVMDef vm = lcr.createVMFromSpec(to); + vm.setHvsType(_hyperVisorType); + + String vmStr = "\n"; + vmStr += "" + name + "\n"; + vmStr += "b0f0a72d-7efb-3cad-a8ff-70ebf30b3af9\n"; + vmStr += "" + os + "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "" + maxRam / 1024 + "\n"; + vmStr += "" + minRam / 1024 + "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "" + cpus + "\n"; + vmStr += "\n"; + vmStr += "hvm\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += ""; + vmStr += "restart\n"; + vmStr += "destroy\n"; + vmStr += "destroy\n"; + vmStr += "\n"; + + assertEquals(vmStr, vm.toString()); + } + /** This test tests if the Agent can handle a vmSpec coming from a >4.1 management server. @@ -143,7 +284,7 @@ public class LibvirtComputingResourceTest { int id = _random.nextInt(65534); String name = "test-instance-1"; - int cpus = _random.nextInt(7) + 1; + int cpus = _random.nextInt(2) + 1; int minSpeed = 1024; int maxSpeed = 2048; int minRam = 256 * 1024; @@ -200,6 +341,7 @@ public class LibvirtComputingResourceTest { //vmStr += "\n"; //vmStr += "" + (cpus * minSpeed) + "\n"; //vmStr += "\n"; + vmStr += ""; vmStr += "restart\n"; vmStr += "destroy\n"; vmStr += "destroy\n"; From 9d3827e6fe48b23639392bfe25c4d3bd2c083eac Mon Sep 17 00:00:00 2001 From: Kelven Yang Date: Fri, 13 Dec 2013 17:25:56 -0800 Subject: [PATCH 008/312] CLOUDSTACK-669: refactor VM work job dispatcher to allow volume/snapshot manager to participate serialized job handling --- .../com/cloud/vm/VirtualMachineManager.java | 41 +- .../src/com/cloud/vm/VmWork.java | 15 +- ...ring-engine-orchestration-core-context.xml | 7 +- .../cloud/vm/VirtualMachineManagerImpl.java | 822 ++++++++++-------- .../com/cloud/vm/VmWorkAddVmToNetwork.java | 36 +- .../src/com/cloud/vm/VmWorkJobDispatcher.java | 118 +-- .../src/com/cloud/vm/VmWorkJobHandler.java | 26 + .../src/com/cloud/vm/VmWorkMigrate.java | 5 +- .../com/cloud/vm/VmWorkMigrateForScale.java | 28 +- .../cloud/vm/VmWorkMigrateWithStorage.java | 32 +- .../src/com/cloud/vm/VmWorkReboot.java | 16 +- .../src/com/cloud/vm/VmWorkReconfigure.java | 28 +- .../com/cloud/vm/VmWorkRemoveNicFromVm.java | 14 +- .../cloud/vm/VmWorkRemoveVmFromNetwork.java | 36 +- .../src/com/cloud/vm/VmWorkStart.java | 136 +-- .../src/com/cloud/vm/VmWorkStop.java | 12 +- .../com/cloud/vm/VmWorkStorageMigration.java | 10 +- .../spring-server-core-managers-context.xml | 10 +- 18 files changed, 703 insertions(+), 689 deletions(-) create mode 100644 engine/orchestration/src/com/cloud/vm/VmWorkJobHandler.java diff --git a/engine/api/src/com/cloud/vm/VirtualMachineManager.java b/engine/api/src/com/cloud/vm/VirtualMachineManager.java index 80497b1dd5f..05cbfc2cb7f 100644 --- a/engine/api/src/com/cloud/vm/VirtualMachineManager.java +++ b/engine/api/src/com/cloud/vm/VirtualMachineManager.java @@ -99,13 +99,8 @@ public interface VirtualMachineManager extends Manager { void advanceStart(String vmUuid, Map params, DeploymentPlan planToDeploy) throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, OperationTimedoutException; - void orchestrateStart(String vmUuid, Map params, DeploymentPlan planToDeploy) throws InsufficientCapacityException, - ResourceUnavailableException, ConcurrentOperationException, OperationTimedoutException; - void advanceStop(String vmUuid, boolean cleanupEvenIfUnableToStop) throws ResourceUnavailableException, OperationTimedoutException, ConcurrentOperationException; - void orchestrateStop(String vmUuid, boolean cleanupEvenIfUnableToStop) throws ResourceUnavailableException, OperationTimedoutException, ConcurrentOperationException; - void advanceExpunge(String vmUuid) throws ResourceUnavailableException, OperationTimedoutException, ConcurrentOperationException; void destroy(String vmUuid) throws AgentUnavailableException, OperationTimedoutException, ConcurrentOperationException; @@ -113,19 +108,11 @@ public interface VirtualMachineManager extends Manager { void migrateAway(String vmUuid, long hostId) throws InsufficientServerCapacityException; void migrate(String vmUuid, long srcHostId, DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException; - - void orchestrateMigrate(String vmUuid, long srcHostId, DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException; - - void migrateWithStorage(String vmUuid, long srcId, long destId, Map volumeToPool) throws ResourceUnavailableException, - ConcurrentOperationException; - - void orchestrateMigrateWithStorage(String vmUuid, long srcId, long destId, Map volumeToPool) throws ResourceUnavailableException, - ConcurrentOperationException; - + + void migrateWithStorage(String vmUuid, long srcId, long destId, Map volumeToPool) throws ResourceUnavailableException, ConcurrentOperationException; + void reboot(String vmUuid, Map params) throws InsufficientCapacityException, ResourceUnavailableException; - void orchestrateReboot(String vmUuid, Map params) throws InsufficientCapacityException, ResourceUnavailableException; - void advanceReboot(String vmUuid, Map params) throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, OperationTimedoutException; @@ -142,8 +129,6 @@ public interface VirtualMachineManager extends Manager { void storageMigration(String vmUuid, StoragePool storagePoolId); - void orchestrateStorageMigration(String vmUuid, StoragePool storagePoolId); - /** * @param vmInstance * @param newServiceOffering @@ -166,12 +151,9 @@ public interface VirtualMachineManager extends Manager { * @throws ResourceUnavailableException * @throws InsufficientCapacityException */ - NicProfile addVmToNetwork(VirtualMachine vm, Network network, NicProfile requested) throws ConcurrentOperationException, ResourceUnavailableException, - InsufficientCapacityException; - - NicProfile orchestrateAddVmToNetwork(VirtualMachine vm, Network network, NicProfile requested) throws ConcurrentOperationException, + NicProfile addVmToNetwork(VirtualMachine vm, Network network, NicProfile requested) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException; - + /** * @param vm * @param nic @@ -181,8 +163,6 @@ public interface VirtualMachineManager extends Manager { */ boolean removeNicFromVm(VirtualMachine vm, Nic nic) throws ConcurrentOperationException, ResourceUnavailableException; - boolean orchestrateRemoveNicFromVm(VirtualMachine vm, Nic nic) throws ConcurrentOperationException, ResourceUnavailableException; - /** * @param vm * @param network @@ -192,9 +172,6 @@ public interface VirtualMachineManager extends Manager { * @throws ConcurrentOperationException */ boolean removeVmFromNetwork(VirtualMachine vm, Network network, URI broadcastUri) throws ConcurrentOperationException, ResourceUnavailableException; - - boolean orchestrateRemoveVmFromNetwork(VirtualMachine vm, Network network, URI broadcastUri) throws ConcurrentOperationException, ResourceUnavailableException; - /** * @param nic * @param hypervisorType @@ -210,16 +187,8 @@ public interface VirtualMachineManager extends Manager { VirtualMachineTO toVmTO(VirtualMachineProfile profile); VirtualMachine reConfigureVm(String vmUuid, ServiceOffering newServiceOffering, boolean sameHost) throws ResourceUnavailableException, ConcurrentOperationException; - - VirtualMachine orchestrateReConfigureVm(String vmUuid, ServiceOffering newServiceOffering, boolean sameHost) throws ResourceUnavailableException, - ConcurrentOperationException; - void findHostAndMigrate(String vmUuid, Long newSvcOfferingId, DeploymentPlanner.ExcludeList excludeHostList) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException; void migrateForScale(String vmUuid, long srcHostId, DeployDestination dest, Long newSvcOfferingId) throws ResourceUnavailableException, ConcurrentOperationException; - - void orchestrateMigrateForScale(String vmUuid, long srcHostId, DeployDestination dest, Long newSvcOfferingId) throws ResourceUnavailableException, - ConcurrentOperationException; - } diff --git a/engine/components-api/src/com/cloud/vm/VmWork.java b/engine/components-api/src/com/cloud/vm/VmWork.java index 751db997d90..98e46a3f3a6 100644 --- a/engine/components-api/src/com/cloud/vm/VmWork.java +++ b/engine/components-api/src/com/cloud/vm/VmWork.java @@ -25,11 +25,14 @@ public class VmWork implements Serializable { long accountId; long vmId; - public VmWork(long userId, long accountId, long vmId) { + String handlerName; + + public VmWork(long userId, long accountId, long vmId, String handlerName) { this.userId = userId; this.accountId = accountId; this.vmId = vmId; - } + this.handlerName = handlerName; + } public long getUserId() { return userId; @@ -39,7 +42,11 @@ public class VmWork implements Serializable { return accountId; } - public long getVmId() { - return vmId; + public long getVmId() { + return vmId; + } + + public String getHandlerName() { + return handlerName; } } diff --git a/engine/orchestration/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml b/engine/orchestration/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml index fc3bae3129f..49cb9cdf3af 100644 --- a/engine/orchestration/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml +++ b/engine/orchestration/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml @@ -60,7 +60,6 @@ - + + + + + + diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java index 7201569b970..f89d83c1da6 100755 --- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -205,9 +205,11 @@ import com.cloud.vm.snapshot.VMSnapshotVO; import com.cloud.vm.snapshot.dao.VMSnapshotDao; @Local(value = VirtualMachineManager.class) -public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMachineManager, Listener, Configurable { +public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMachineManager, VmWorkJobHandler, Listener, Configurable { private static final Logger s_logger = Logger.getLogger(VirtualMachineManagerImpl.class); + public static final String VM_WORK_JOB_HANDLER = VirtualMachineManagerImpl.class.getSimpleName(); + private static final String VM_SYNC_ALERT_SUBJECT = "VM state sync alert"; @Inject @@ -270,6 +272,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac protected AffinityGroupVMMapDao _affinityGroupVMMapDao; @Inject protected EntityManager _entityMgr; + @Inject ConfigDepot _configDepot; @@ -731,12 +734,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } } - - @Override - public void orchestrateStart(String vmUuid, Map params, DeploymentPlan planToDeploy) throws InsufficientCapacityException, - ConcurrentOperationException, ResourceUnavailableException { - - CallContext cctxt = CallContext.current(); + + private void orchestrateStart(String vmUuid, Map params, DeploymentPlan planToDeploy) throws InsufficientCapacityException, + ConcurrentOperationException, ResourceUnavailableException { + + CallContext cctxt = CallContext.current(); Account account = cctxt.getCallingAccount(); User caller = cctxt.getCallingUser(); @@ -1254,9 +1256,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } - @Override - public void orchestrateStop(String vmUuid, boolean cleanUpEvenIfUnableToStop) throws AgentUnavailableException, OperationTimedoutException, - ConcurrentOperationException { + private void orchestrateStop(String vmUuid, boolean cleanUpEvenIfUnableToStop) throws AgentUnavailableException, OperationTimedoutException, ConcurrentOperationException { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); advanceStop(vm, cleanUpEvenIfUnableToStop); @@ -1542,8 +1542,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } - @Override - public void orchestrateStorageMigration(String vmUuid, StoragePool destPool) { + private void orchestrateStorageMigration(String vmUuid, StoragePool destPool) { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); try { @@ -1628,9 +1627,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } } - - @Override - public void orchestrateMigrate(String vmUuid, long srcHostId, DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException { + + private void orchestrateMigrate(String vmUuid, long srcHostId, DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); if (vm == null) { if (s_logger.isDebugEnabled()) { @@ -1898,11 +1896,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } } - - @Override - public void orchestrateMigrateWithStorage(String vmUuid, long srcHostId, long destHostId, Map volumeToPool) throws ResourceUnavailableException, - ConcurrentOperationException { - + + private void orchestrateMigrateWithStorage(String vmUuid, long srcHostId, long destHostId, Map volumeToPool) throws ResourceUnavailableException, + ConcurrentOperationException { + VMInstanceVO vm = _vmDao.findByUuid(vmUuid); HostVO srcHost = _hostDao.findById(srcHostId); @@ -2180,10 +2177,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } } - - @Override - public void orchestrateReboot(String vmUuid, Map params) throws InsufficientCapacityException, ConcurrentOperationException, - ResourceUnavailableException { + + private void orchestrateReboot(String vmUuid, Map params) throws InsufficientCapacityException, ConcurrentOperationException, + ResourceUnavailableException { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); DataCenter dc = _entityMgr.findById(DataCenter.class, vm.getDataCenterId()); @@ -3112,11 +3108,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } } - - @Override - public NicProfile orchestrateAddVmToNetwork(VirtualMachine vm, Network network, NicProfile requested) throws ConcurrentOperationException, - ResourceUnavailableException, - InsufficientCapacityException { + + private NicProfile orchestrateAddVmToNetwork(VirtualMachine vm, Network network, NicProfile requested) throws ConcurrentOperationException, ResourceUnavailableException, + InsufficientCapacityException { CallContext cctx = CallContext.current(); s_logger.debug("Adding vm " + vm + " to network " + network + "; requested nic profile " + requested); @@ -3221,8 +3215,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } - @Override - public boolean orchestrateRemoveNicFromVm(VirtualMachine vm, Nic nic) throws ConcurrentOperationException, ResourceUnavailableException { + private boolean orchestrateRemoveNicFromVm(VirtualMachine vm, Nic nic) throws ConcurrentOperationException, ResourceUnavailableException { CallContext cctx = CallContext.current(); VMInstanceVO vmVO = _vmDao.findById(vm.getId()); NetworkVO network = _networkDao.findById(nic.getNetworkId()); @@ -3287,10 +3280,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac // TODO will serialize on the VM object later to resolve operation conflicts return orchestrateRemoveVmFromNetwork(vm, network, broadcastUri); } - - @Override @DB - public boolean orchestrateRemoveVmFromNetwork(VirtualMachine vm, Network network, URI broadcastUri) throws ConcurrentOperationException, ResourceUnavailableException { + private boolean orchestrateRemoveVmFromNetwork(VirtualMachine vm, Network network, URI broadcastUri) throws ConcurrentOperationException, ResourceUnavailableException { CallContext cctx = CallContext.current(); VMInstanceVO vmVO = _vmDao.findById(vm.getId()); ReservationContext context = new ReservationContextImpl(null, null, cctx.getCallingUser(), cctx.getCallingAccount()); @@ -3456,11 +3447,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } - @Override - public void orchestrateMigrateForScale(String vmUuid, long srcHostId, DeployDestination dest, Long oldSvcOfferingId) - throws ResourceUnavailableException, ConcurrentOperationException { - - VMInstanceVO vm = _vmDao.findByUuid(vmUuid); + private void orchestrateMigrateForScale(String vmUuid, long srcHostId, DeployDestination dest, Long oldSvcOfferingId) + throws ResourceUnavailableException, ConcurrentOperationException { + + VMInstanceVO vm = _vmDao.findByUuid(vmUuid); s_logger.info("Migrating " + vm + " to " + dest); vm.getServiceOfferingId(); @@ -3713,11 +3703,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } } - - @Override - public VMInstanceVO orchestrateReConfigureVm(String vmUuid, ServiceOffering oldServiceOffering, boolean reconfiguringOnExistingHost) - throws ResourceUnavailableException, - ConcurrentOperationException { + + private VMInstanceVO orchestrateReConfigureVm(String vmUuid, ServiceOffering oldServiceOffering, boolean reconfiguringOnExistingHost) throws ResourceUnavailableException, + ConcurrentOperationException { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); long newServiceofferingId = vm.getServiceOfferingId(); @@ -4160,7 +4148,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac _vmDao.lockRow(vm.getId(), true); List pendingWorkJobs = _workJobDao.listPendingWorkJobs(VirtualMachine.Type.Instance, - vm.getId(), VmWorkStart.class.getName()); + vm.getId(), VmWorkStart.class.getName()); if (pendingWorkJobs.size() > 0) { assert (pendingWorkJobs.size() == 1); @@ -4175,26 +4163,26 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac workJob.setUserId(callingUser.getId()); workJob.setStep(VmWorkJobVO.Step.Starting); workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); + workJob.setVmInstanceId(vm.getId()); - // save work context info (there are some duplications) - VmWorkStart workInfo = new VmWorkStart(callingUser.getId(), callingAccount.getId(), vm.getId()); - workInfo.setPlan(planToDeploy); - workInfo.setParams(params); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - - // Transaction syntax sugar has a cost here - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + // save work context info (there are some duplications) + VmWorkStart workInfo = new VmWorkStart(callingUser.getId(), callingAccount.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER); + workInfo.setPlan(planToDeploy); + workInfo.setParams(params); + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + + // Transaction syntax sugar has a cost here + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + return new VmStateSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), VirtualMachine.PowerState.PowerOn, vm.getId(), null); } @@ -4212,40 +4200,40 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac _vmDao.lockRow(vm.getId(), true); List pendingWorkJobs = _workJobDao.listPendingWorkJobs( - VirtualMachine.Type.Instance, vm.getId(), - VmWorkStop.class.getName()); + VirtualMachine.Type.Instance, vm.getId(), + VmWorkStop.class.getName()); VmWorkJobVO workJob = null; if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { assert (pendingWorkJobs.size() == 1); workJob = pendingWorkJobs.get(0); - } else { - workJob = new VmWorkJobVO(context.getContextId()); - - workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); - workJob.setCmd(VmWorkStop.class.getName()); - - workJob.setAccountId(account.getId()); - workJob.setUserId(user.getId()); - workJob.setStep(VmWorkJobVO.Step.Prepare); - workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); - - // save work context info (there are some duplications) - VmWorkStop workInfo = new VmWorkStop(user.getId(), account.getId(), vm.getId(), cleanup); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + } else { + workJob = new VmWorkJobVO(context.getContextId()); + + workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); + workJob.setCmd(VmWorkStop.class.getName()); + + workJob.setAccountId(account.getId()); + workJob.setUserId(user.getId()); + workJob.setStep(VmWorkJobVO.Step.Prepare); + workJob.setVmType(vm.getType()); + workJob.setVmInstanceId(vm.getId()); + + // save work context info (there are some duplications) + VmWorkStop workInfo = new VmWorkStop(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, cleanup); + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + return new VmStateSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), VirtualMachine.PowerState.PowerOff, vm.getId(), null); } @@ -4265,8 +4253,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac _vmDao.lockRow(vm.getId(), true); List pendingWorkJobs = _workJobDao.listPendingWorkJobs( - VirtualMachine.Type.Instance, vm.getId(), - VmWorkReboot.class.getName()); + VirtualMachine.Type.Instance, vm.getId(), + VmWorkReboot.class.getName()); VmWorkJobVO workJob = null; if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { @@ -4277,28 +4265,28 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); workJob.setCmd(VmWorkReboot.class.getName()); + + workJob.setAccountId(account.getId()); + workJob.setUserId(user.getId()); + workJob.setStep(VmWorkJobVO.Step.Prepare); + workJob.setVmType(vm.getType()); + workJob.setVmInstanceId(vm.getId()); + + // save work context info (there are some duplications) + VmWorkReboot workInfo = new VmWorkReboot(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, params); + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); - workJob.setAccountId(account.getId()); - workJob.setUserId(user.getId()); - workJob.setStep(VmWorkJobVO.Step.Prepare); - workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); - - // save work context info (there are some duplications) - VmWorkReboot workInfo = new VmWorkReboot(user.getId(), account.getId(), vm.getId(), params); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); - + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + return new VmJobSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), vm.getId()); } @@ -4313,50 +4301,49 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - - _vmDao.lockRow(vm.getId(), true); - - List pendingWorkJobs = _workJobDao.listPendingWorkJobs( - VirtualMachine.Type.Instance, vm.getId(), - VmWorkMigrate.class.getName()); - - VmWorkJobVO workJob = null; - if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { - assert (pendingWorkJobs.size() == 1); - workJob = pendingWorkJobs.get(0); - } else { - - workJob = new VmWorkJobVO(context.getContextId()); - - workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); - workJob.setCmd(VmWorkMigrate.class.getName()); - - workJob.setAccountId(account.getId()); - workJob.setUserId(user.getId()); - workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); - - // save work context info (there are some duplications) - VmWorkMigrate workInfo = new VmWorkMigrate(user.getId(), account.getId(), vm.getId(), srcHostId, dest); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); - + _vmDao.lockRow(vm.getId(), true); + + List pendingWorkJobs = _workJobDao.listPendingWorkJobs( + VirtualMachine.Type.Instance, vm.getId(), + VmWorkMigrate.class.getName()); + + VmWorkJobVO workJob = null; + if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { + assert (pendingWorkJobs.size() == 1); + workJob = pendingWorkJobs.get(0); + } else { + + workJob = new VmWorkJobVO(context.getContextId()); + + workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); + workJob.setCmd(VmWorkMigrate.class.getName()); + + workJob.setAccountId(account.getId()); + workJob.setUserId(user.getId()); + workJob.setVmType(vm.getType()); + workJob.setVmInstanceId(vm.getId()); + + // save work context info (there are some duplications) + VmWorkMigrate workInfo = new VmWorkMigrate(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, srcHostId, dest); + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + return new VmStateSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), - VirtualMachine.PowerState.PowerOn, vm.getId(), vm.getPowerHostId()); + VirtualMachine.PowerState.PowerOn, vm.getId(), vm.getPowerHostId()); } public Outcome migrateVmWithStorageThroughJobQueue( - final String vmUuid, final long srcHostId, final long destHostId, - final Map volumeToPool) { + final String vmUuid, final long srcHostId, final long destHostId, + final Map volumeToPool) { final CallContext context = CallContext.current(); final User user = context.getCallingUser(); @@ -4367,50 +4354,49 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - - _vmDao.lockRow(vm.getId(), true); - - List pendingWorkJobs = _workJobDao.listPendingWorkJobs( - VirtualMachine.Type.Instance, vm.getId(), - VmWorkMigrateWithStorage.class.getName()); - - VmWorkJobVO workJob = null; - if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { - assert (pendingWorkJobs.size() == 1); - workJob = pendingWorkJobs.get(0); - } else { - - workJob = new VmWorkJobVO(context.getContextId()); - - workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); - workJob.setCmd(VmWorkMigrate.class.getName()); - - workJob.setAccountId(account.getId()); - workJob.setUserId(user.getId()); - workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); - - // save work context info (there are some duplications) + _vmDao.lockRow(vm.getId(), true); + + List pendingWorkJobs = _workJobDao.listPendingWorkJobs( + VirtualMachine.Type.Instance, vm.getId(), + VmWorkMigrateWithStorage.class.getName()); + + VmWorkJobVO workJob = null; + if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { + assert (pendingWorkJobs.size() == 1); + workJob = pendingWorkJobs.get(0); + } else { + + workJob = new VmWorkJobVO(context.getContextId()); + + workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); + workJob.setCmd(VmWorkMigrate.class.getName()); + + workJob.setAccountId(account.getId()); + workJob.setUserId(user.getId()); + workJob.setVmType(vm.getType()); + workJob.setVmInstanceId(vm.getId()); + + // save work context info (there are some duplications) VmWorkMigrateWithStorage workInfo = new VmWorkMigrateWithStorage(user.getId(), account.getId(), vm.getId(), - srcHostId, destHostId, volumeToPool); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); - + VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, srcHostId, destHostId, volumeToPool); + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + return new VmStateSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), - VirtualMachine.PowerState.PowerOn, vm.getId(), destHostId); + VirtualMachine.PowerState.PowerOn, vm.getId(), destHostId); } public Outcome migrateVmForScaleThroughJobQueue( - final String vmUuid, final long srcHostId, final DeployDestination dest, final Long newSvcOfferingId) { + final String vmUuid, final long srcHostId, final DeployDestination dest, final Long newSvcOfferingId) { final CallContext context = CallContext.current(); final User user = context.getCallingUser(); @@ -4421,49 +4407,48 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - - _vmDao.lockRow(vm.getId(), true); - - List pendingWorkJobs = _workJobDao.listPendingWorkJobs( - VirtualMachine.Type.Instance, vm.getId(), - VmWorkMigrateForScale.class.getName()); - - VmWorkJobVO workJob = null; - if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { - assert (pendingWorkJobs.size() == 1); - workJob = pendingWorkJobs.get(0); - } else { - - workJob = new VmWorkJobVO(context.getContextId()); - - workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); + _vmDao.lockRow(vm.getId(), true); + + List pendingWorkJobs = _workJobDao.listPendingWorkJobs( + VirtualMachine.Type.Instance, vm.getId(), + VmWorkMigrateForScale.class.getName()); + + VmWorkJobVO workJob = null; + if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { + assert (pendingWorkJobs.size() == 1); + workJob = pendingWorkJobs.get(0); + } else { + + workJob = new VmWorkJobVO(context.getContextId()); + + workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); workJob.setCmd(VmWorkMigrate.class.getName()); - - workJob.setAccountId(account.getId()); - workJob.setUserId(user.getId()); - workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); - - // save work context info (there are some duplications) + + workJob.setAccountId(account.getId()); + workJob.setUserId(user.getId()); + workJob.setVmType(vm.getType()); + workJob.setVmInstanceId(vm.getId()); + + // save work context info (there are some duplications) VmWorkMigrateForScale workInfo = new VmWorkMigrateForScale(user.getId(), account.getId(), vm.getId(), - srcHostId, dest, newSvcOfferingId); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); - + VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, srcHostId, dest, newSvcOfferingId); + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + return new VmJobSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), vm.getId()); } public Outcome migrateVmStorageThroughJobQueue( - final String vmUuid, final StoragePool destPool) { + final String vmUuid, final StoragePool destPool) { final CallContext context = CallContext.current(); final User user = context.getCallingUser(); @@ -4474,49 +4459,48 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - - _vmDao.lockRow(vm.getId(), true); - - List pendingWorkJobs = _workJobDao.listPendingWorkJobs( - VirtualMachine.Type.Instance, vm.getId(), - VmWorkStorageMigration.class.getName()); - - VmWorkJobVO workJob = null; - if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { - assert (pendingWorkJobs.size() == 1); - workJob = pendingWorkJobs.get(0); - } else { - - workJob = new VmWorkJobVO(context.getContextId()); - - workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); + _vmDao.lockRow(vm.getId(), true); + + List pendingWorkJobs = _workJobDao.listPendingWorkJobs( + VirtualMachine.Type.Instance, vm.getId(), + VmWorkStorageMigration.class.getName()); + + VmWorkJobVO workJob = null; + if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { + assert (pendingWorkJobs.size() == 1); + workJob = pendingWorkJobs.get(0); + } else { + + workJob = new VmWorkJobVO(context.getContextId()); + + workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); workJob.setCmd(VmWorkStorageMigration.class.getName()); - - workJob.setAccountId(account.getId()); - workJob.setUserId(user.getId()); - workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); - - // save work context info (there are some duplications) + + workJob.setAccountId(account.getId()); + workJob.setUserId(user.getId()); + workJob.setVmType(vm.getType()); + workJob.setVmInstanceId(vm.getId()); + + // save work context info (there are some duplications) VmWorkStorageMigration workInfo = new VmWorkStorageMigration(user.getId(), account.getId(), vm.getId(), - destPool); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); - + VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, destPool); + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + return new VmJobSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), vm.getId()); } public Outcome addVmToNetworkThroughJobQueue( - final VirtualMachine vm, final Network network, final NicProfile requested) { + final VirtualMachine vm, final Network network, final NicProfile requested) { final CallContext context = CallContext.current(); final User user = context.getCallingUser(); @@ -4525,12 +4509,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - _vmDao.lockRow(vm.getId(), true); List pendingWorkJobs = _workJobDao.listPendingWorkJobs( - VirtualMachine.Type.Instance, vm.getId(), - VmWorkAddVmToNetwork.class.getName()); + VirtualMachine.Type.Instance, vm.getId(), + VmWorkAddVmToNetwork.class.getName()); VmWorkJobVO workJob = null; if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { @@ -4550,7 +4533,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac // save work context info (there are some duplications) VmWorkAddVmToNetwork workInfo = new VmWorkAddVmToNetwork(user.getId(), account.getId(), vm.getId(), - network, requested); + VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, network, requested); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); @@ -4567,7 +4550,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } public Outcome removeNicFromVmThroughJobQueue( - final VirtualMachine vm, final Nic nic) { + final VirtualMachine vm, final Nic nic) { final CallContext context = CallContext.current(); final User user = context.getCallingUser(); @@ -4576,49 +4559,48 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - - _vmDao.lockRow(vm.getId(), true); - - List pendingWorkJobs = _workJobDao.listPendingWorkJobs( - VirtualMachine.Type.Instance, vm.getId(), - VmWorkRemoveNicFromVm.class.getName()); - - VmWorkJobVO workJob = null; - if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { - assert (pendingWorkJobs.size() == 1); - workJob = pendingWorkJobs.get(0); - } else { - - workJob = new VmWorkJobVO(context.getContextId()); - - workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); - workJob.setCmd(VmWorkRemoveNicFromVm.class.getName()); - - workJob.setAccountId(account.getId()); - workJob.setUserId(user.getId()); - workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); - - // save work context info (there are some duplications) - VmWorkRemoveNicFromVm workInfo = new VmWorkRemoveNicFromVm(user.getId(), account.getId(), vm.getId(), - nic); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + _vmDao.lockRow(vm.getId(), true); + + List pendingWorkJobs = _workJobDao.listPendingWorkJobs( + VirtualMachine.Type.Instance, vm.getId(), + VmWorkRemoveNicFromVm.class.getName()); + + VmWorkJobVO workJob = null; + if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { + assert (pendingWorkJobs.size() == 1); + workJob = pendingWorkJobs.get(0); + } else { + + workJob = new VmWorkJobVO(context.getContextId()); + + workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); + workJob.setCmd(VmWorkRemoveNicFromVm.class.getName()); + + workJob.setAccountId(account.getId()); + workJob.setUserId(user.getId()); + workJob.setVmType(vm.getType()); + workJob.setVmInstanceId(vm.getId()); + + // save work context info (there are some duplications) + VmWorkRemoveNicFromVm workInfo = new VmWorkRemoveNicFromVm(user.getId(), account.getId(), vm.getId(), + VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, nic); + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); return new VmJobSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), vm.getId()); } public Outcome removeVmFromNetworkThroughJobQueue( - final VirtualMachine vm, final Network network, final URI broadcastUri) { + final VirtualMachine vm, final Network network, final URI broadcastUri) { final CallContext context = CallContext.current(); final User user = context.getCallingUser(); @@ -4627,44 +4609,43 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - - _vmDao.lockRow(vm.getId(), true); - - List pendingWorkJobs = _workJobDao.listPendingWorkJobs( - VirtualMachine.Type.Instance, vm.getId(), - VmWorkRemoveVmFromNetwork.class.getName()); - - VmWorkJobVO workJob = null; - if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { - assert (pendingWorkJobs.size() == 1); - workJob = pendingWorkJobs.get(0); - } else { - - workJob = new VmWorkJobVO(context.getContextId()); - - workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); - workJob.setCmd(VmWorkRemoveVmFromNetwork.class.getName()); - - workJob.setAccountId(account.getId()); - workJob.setUserId(user.getId()); - workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); - - // save work context info (there are some duplications) - VmWorkRemoveVmFromNetwork workInfo = new VmWorkRemoveVmFromNetwork(user.getId(), account.getId(), vm.getId(), - network, broadcastUri); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); - + _vmDao.lockRow(vm.getId(), true); + + List pendingWorkJobs = _workJobDao.listPendingWorkJobs( + VirtualMachine.Type.Instance, vm.getId(), + VmWorkRemoveVmFromNetwork.class.getName()); + + VmWorkJobVO workJob = null; + if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { + assert (pendingWorkJobs.size() == 1); + workJob = pendingWorkJobs.get(0); + } else { + + workJob = new VmWorkJobVO(context.getContextId()); + + workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); + workJob.setCmd(VmWorkRemoveVmFromNetwork.class.getName()); + + workJob.setAccountId(account.getId()); + workJob.setUserId(user.getId()); + workJob.setVmType(vm.getType()); + workJob.setVmInstanceId(vm.getId()); + + // save work context info (there are some duplications) + VmWorkRemoveVmFromNetwork workInfo = new VmWorkRemoveVmFromNetwork(user.getId(), account.getId(), vm.getId(), + VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, network, broadcastUri); + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + return new VmJobSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), vm.getId()); } @@ -4680,45 +4661,114 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - - _vmDao.lockRow(vm.getId(), true); - - List pendingWorkJobs = _workJobDao.listPendingWorkJobs( - VirtualMachine.Type.Instance, vm.getId(), - VmWorkReconfigure.class.getName()); - - VmWorkJobVO workJob = null; - if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { - assert (pendingWorkJobs.size() == 1); - workJob = pendingWorkJobs.get(0); - } else { - - workJob = new VmWorkJobVO(context.getContextId()); - - workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); - workJob.setCmd(VmWorkReconfigure.class.getName()); - - workJob.setAccountId(account.getId()); - workJob.setUserId(user.getId()); - workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); - - // save work context info (there are some duplications) - VmWorkReconfigure workInfo = new VmWorkReconfigure(user.getId(), account.getId(), vm.getId(), - oldServiceOffering, reconfiguringOnExistingHost); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); - + _vmDao.lockRow(vm.getId(), true); + + List pendingWorkJobs = _workJobDao.listPendingWorkJobs( + VirtualMachine.Type.Instance, vm.getId(), + VmWorkReconfigure.class.getName()); + + VmWorkJobVO workJob = null; + if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { + assert (pendingWorkJobs.size() == 1); + workJob = pendingWorkJobs.get(0); + } else { + + workJob = new VmWorkJobVO(context.getContextId()); + + workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); + workJob.setCmd(VmWorkReconfigure.class.getName()); + + workJob.setAccountId(account.getId()); + workJob.setUserId(user.getId()); + workJob.setVmType(vm.getType()); + workJob.setVmInstanceId(vm.getId()); + + // save work context info (there are some duplications) + VmWorkReconfigure workInfo = new VmWorkReconfigure(user.getId(), account.getId(), vm.getId(), + VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, oldServiceOffering, reconfiguringOnExistingHost); + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + return new VmJobSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), vm.getId()); } + + @Override + public Pair handleVmWorkJob(AsyncJob job, VmWork work) throws Exception { + VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); + if (vm == null) { + s_logger.info("Unable to find vm " + work.getVmId()); + } + assert (vm != null); + if (work instanceof VmWorkStart) { + VmWorkStart workStart = (VmWorkStart)work; + orchestrateStart(vm.getUuid(), workStart.getParams(), workStart.getPlan()); + return new Pair(JobInfo.Status.SUCCEEDED, null); + } else if (work instanceof VmWorkStop) { + VmWorkStop workStop = (VmWorkStop)work; + orchestrateStop(vm.getUuid(), workStop.isCleanup()); + return new Pair(JobInfo.Status.SUCCEEDED, null); + } else if (work instanceof VmWorkMigrate) { + VmWorkMigrate workMigrate = (VmWorkMigrate)work; + orchestrateMigrate(vm.getUuid(), workMigrate.getSrcHostId(), workMigrate.getDeployDestination()); + return new Pair(JobInfo.Status.SUCCEEDED, null); + } else if (work instanceof VmWorkMigrateWithStorage) { + VmWorkMigrateWithStorage workMigrateWithStorage = (VmWorkMigrateWithStorage)work; + orchestrateMigrateWithStorage(vm.getUuid(), + workMigrateWithStorage.getSrcHostId(), + workMigrateWithStorage.getDestHostId(), + workMigrateWithStorage.getVolumeToPool()); + return new Pair(JobInfo.Status.SUCCEEDED, null); + } else if (work instanceof VmWorkMigrateForScale) { + VmWorkMigrateForScale workMigrateForScale = (VmWorkMigrateForScale)work; + orchestrateMigrateForScale(vm.getUuid(), + workMigrateForScale.getSrcHostId(), + workMigrateForScale.getDeployDestination(), + workMigrateForScale.getNewServiceOfferringId()); + return new Pair(JobInfo.Status.SUCCEEDED, null); + } else if (work instanceof VmWorkReboot) { + VmWorkReboot workReboot = (VmWorkReboot)work; + orchestrateReboot(vm.getUuid(), workReboot.getParams()); + return new Pair(JobInfo.Status.SUCCEEDED, null); + } else if (work instanceof VmWorkAddVmToNetwork) { + VmWorkAddVmToNetwork workAddVmToNetwork = (VmWorkAddVmToNetwork)work; + NicProfile nic = orchestrateAddVmToNetwork(vm, workAddVmToNetwork.getNetwork(), + workAddVmToNetwork.getRequestedNicProfile()); + return new Pair(JobInfo.Status.SUCCEEDED, JobSerializerHelper.toObjectSerializedString(nic)); + } else if (work instanceof VmWorkRemoveNicFromVm) { + VmWorkRemoveNicFromVm workRemoveNicFromVm = (VmWorkRemoveNicFromVm)work; + boolean result = orchestrateRemoveNicFromVm(vm, workRemoveNicFromVm.getNic()); + return new Pair(JobInfo.Status.SUCCEEDED, + JobSerializerHelper.toObjectSerializedString(new Boolean(result))); + } else if (work instanceof VmWorkRemoveVmFromNetwork) { + VmWorkRemoveVmFromNetwork workRemoveVmFromNetwork = (VmWorkRemoveVmFromNetwork)work; + boolean result = orchestrateRemoveVmFromNetwork(vm, + workRemoveVmFromNetwork.getNetwork(), workRemoveVmFromNetwork.getBroadcastUri()); + return new Pair(JobInfo.Status.SUCCEEDED, + JobSerializerHelper.toObjectSerializedString(new Boolean(result))); + } else if (work instanceof VmWorkReconfigure) { + VmWorkReconfigure workReconfigure = (VmWorkReconfigure)work; + reConfigureVm(vm.getUuid(), workReconfigure.getNewServiceOffering(), + workReconfigure.isSameHost()); + return new Pair(JobInfo.Status.SUCCEEDED, null); + } else if (work instanceof VmWorkStorageMigration) { + VmWorkStorageMigration workStorageMigration = (VmWorkStorageMigration)work; + orchestrateStorageMigration(vm.getUuid(), workStorageMigration.getDestStoragePool()); + return new Pair(JobInfo.Status.SUCCEEDED, null); + } else { + RuntimeException e = new RuntimeException("Unsupported VM work command: " + job.getCmd()); + String exceptionJson = JobSerializerHelper.toSerializedString(e); + s_logger.error("Serialize exception object into json: " + exceptionJson); + return new Pair(JobInfo.Status.FAILED, exceptionJson); + } + } } diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkAddVmToNetwork.java b/engine/orchestration/src/com/cloud/vm/VmWorkAddVmToNetwork.java index 97396f12693..2a577f3aefb 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkAddVmToNetwork.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkAddVmToNetwork.java @@ -21,22 +21,22 @@ import com.cloud.network.Network; public class VmWorkAddVmToNetwork extends VmWork { private static final long serialVersionUID = 8861516006586736813L; - Network network; - NicProfile requstedNicProfile; - - public VmWorkAddVmToNetwork(long userId, long accountId, long vmId, - Network network, NicProfile requested) { - super(userId, accountId, vmId); - - this.network = network; - this.requstedNicProfile = requested; - } - - public Network getNetwork() { - return this.network; - } - - public NicProfile getRequestedNicProfile() { - return this.requstedNicProfile; - } + Network network; + NicProfile requstedNicProfile; + + public VmWorkAddVmToNetwork(long userId, long accountId, long vmId, String handlerName, + Network network, NicProfile requested) { + super(userId, accountId, vmId, handlerName); + + this.network = network; + requstedNicProfile = requested; + } + + public Network getNetwork() { + return network; + } + + public NicProfile getRequestedNicProfile() { + return requstedNicProfile; + } } diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkJobDispatcher.java b/engine/orchestration/src/com/cloud/vm/VmWorkJobDispatcher.java index 7534363b921..e29a99c5916 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkJobDispatcher.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkJobDispatcher.java @@ -16,9 +16,12 @@ // under the License. package com.cloud.vm; +import java.util.Map; + import javax.inject.Inject; import org.apache.log4j.Logger; + import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.jobs.AsyncJob; import org.apache.cloudstack.framework.jobs.AsyncJobDispatcher; @@ -26,6 +29,7 @@ import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.framework.jobs.impl.JobSerializerHelper; import org.apache.cloudstack.jobs.JobInfo; +import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; import com.cloud.vm.dao.VMInstanceDao; @@ -35,15 +39,25 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch public static final String VM_WORK_QUEUE = "VmWorkJobQueue"; public static final String VM_WORK_JOB_DISPATCHER = "VmWorkJobDispatcher"; public static final String VM_WORK_JOB_WAKEUP_DISPATCHER = "VmWorkJobWakeupDispatcher"; + + @Inject private VirtualMachineManagerImpl _vmMgr; + @Inject private AsyncJobManager _asyncJobMgr; + @Inject private VMInstanceDao _instanceDao; + + private Map _handlers; - @Inject - private VirtualMachineManagerImpl _vmMgr; - @Inject - private AsyncJobManager _asyncJobMgr; - @Inject - private VMInstanceDao _instanceDao; + public VmWorkJobDispatcher() { + } - @Override + public Map getHandlers() { + return _handlers; + } + + public void setHandlers(Map handlers) { + _handlers = handlers; + } + + @Override public void runJob(AsyncJob job) { VmWork work = null; try { @@ -69,80 +83,26 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, "Unable to deserialize VM work"); return; } + + if (_handlers == null || _handlers.isEmpty()) { + s_logger.error("Invalid startup configuration, no work job handler is found. cmd: " + job.getCmd() + ", job info: " + job.getCmdInfo()); + _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, "Invalid startup configuration. no job handler is found"); + return; + } + + VmWorkJobHandler handler = _handlers.get(work.getHandlerName()); + + if (handler == null) { + s_logger.error("Unable to find work job handler. handler name: " + work.getHandlerName() + ", job cmd: " + job.getCmd() + ", job info: " + job.getCmdInfo()); + _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, "Unable to find work job handler"); + return; + } CallContext.register(work.getUserId(), work.getAccountId(), job.getRelated()); - - VMInstanceVO vm = _instanceDao.findById(work.getVmId()); - if (vm == null) { - s_logger.info("Unable to find vm " + work.getVmId()); - } - assert (vm != null); - if (work instanceof VmWorkStart) { - VmWorkStart workStart = (VmWorkStart)work; - _vmMgr.orchestrateStart(vm.getUuid(), workStart.getParams(), workStart.getPlan()); - _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.SUCCEEDED, 0, null); - } else if (work instanceof VmWorkStop) { - VmWorkStop workStop = (VmWorkStop)work; - _vmMgr.orchestrateStop(vm.getUuid(), workStop.isCleanup()); - _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.SUCCEEDED, 0, null); - } else if (work instanceof VmWorkMigrate) { - VmWorkMigrate workMigrate = (VmWorkMigrate)work; - _vmMgr.orchestrateMigrate(vm.getUuid(), workMigrate.getSrcHostId(), workMigrate.getDeployDestination()); - _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.SUCCEEDED, 0, null); - } else if (work instanceof VmWorkMigrateWithStorage) { - VmWorkMigrateWithStorage workMigrateWithStorage = (VmWorkMigrateWithStorage)work; - _vmMgr.orchestrateMigrateWithStorage(vm.getUuid(), - workMigrateWithStorage.getSrcHostId(), - workMigrateWithStorage.getDestHostId(), - workMigrateWithStorage.getVolumeToPool()); - _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.SUCCEEDED, 0, null); - } else if (work instanceof VmWorkMigrateForScale) { - VmWorkMigrateForScale workMigrateForScale = (VmWorkMigrateForScale)work; - _vmMgr.orchestrateMigrateForScale(vm.getUuid(), - workMigrateForScale.getSrcHostId(), - workMigrateForScale.getDeployDestination(), - workMigrateForScale.getNewServiceOfferringId()); - _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.SUCCEEDED, 0, null); - } else if (work instanceof VmWorkReboot) { - VmWorkReboot workReboot = (VmWorkReboot)work; - _vmMgr.orchestrateReboot(vm.getUuid(), workReboot.getParams()); - _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.SUCCEEDED, 0, null); - } else if (work instanceof VmWorkAddVmToNetwork) { - VmWorkAddVmToNetwork workAddVmToNetwork = (VmWorkAddVmToNetwork)work; - NicProfile nic = _vmMgr.orchestrateAddVmToNetwork(vm, workAddVmToNetwork.getNetwork(), - workAddVmToNetwork.getRequestedNicProfile()); - _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.SUCCEEDED, 0, - JobSerializerHelper.toObjectSerializedString(nic)); - } else if (work instanceof VmWorkRemoveNicFromVm) { - VmWorkRemoveNicFromVm workRemoveNicFromVm = (VmWorkRemoveNicFromVm)work; - boolean result = _vmMgr.orchestrateRemoveNicFromVm(vm, workRemoveNicFromVm.getNic()); - _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.SUCCEEDED, 0, - JobSerializerHelper.toObjectSerializedString(new Boolean(result))); - } else if (work instanceof VmWorkRemoveVmFromNetwork) { - VmWorkRemoveVmFromNetwork workRemoveVmFromNetwork = (VmWorkRemoveVmFromNetwork)work; - boolean result = _vmMgr.orchestrateRemoveVmFromNetwork(vm, - workRemoveVmFromNetwork.getNetwork(), workRemoveVmFromNetwork.getBroadcastUri()); - _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.SUCCEEDED, 0, - JobSerializerHelper.toObjectSerializedString(new Boolean(result))); - } else if (work instanceof VmWorkReconfigure) { - VmWorkReconfigure workReconfigure = (VmWorkReconfigure)work; - _vmMgr.reConfigureVm(vm.getUuid(), workReconfigure.getNewServiceOffering(), - workReconfigure.isSameHost()); - _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.SUCCEEDED, 0, null); - } else if (work instanceof VmWorkStorageMigration) { - VmWorkStorageMigration workStorageMigration = (VmWorkStorageMigration)work; - _vmMgr.orchestrateStorageMigration(vm.getUuid(), workStorageMigration.getDestStoragePool()); - _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.SUCCEEDED, 0, null); - } else { - assert (false); - s_logger.error("Unhandled VM work command: " + job.getCmd()); - - RuntimeException e = new RuntimeException("Unsupported VM work command: " + job.getCmd()); - String exceptionJson = JobSerializerHelper.toSerializedString(e); - s_logger.error("Serialize exception object into json: " + exceptionJson); - _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, exceptionJson); - } - } catch (Throwable e) { + + Pair result = handler.handleVmWorkJob(job, work); + _asyncJobMgr.completeAsyncJob(job.getId(), result.first(), 0, result.second()); + } catch(Throwable e) { s_logger.error("Unable to complete " + job, e); String exceptionJson = JobSerializerHelper.toSerializedString(e); diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkJobHandler.java b/engine/orchestration/src/com/cloud/vm/VmWorkJobHandler.java new file mode 100644 index 00000000000..6ab1bbc4c77 --- /dev/null +++ b/engine/orchestration/src/com/cloud/vm/VmWorkJobHandler.java @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm; + +import org.apache.cloudstack.framework.jobs.AsyncJob; +import org.apache.cloudstack.jobs.JobInfo; + +import com.cloud.utils.Pair; + +public interface VmWorkJobHandler { + Pair handleVmWorkJob(AsyncJob job, VmWork work) throws Exception; +} diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkMigrate.java b/engine/orchestration/src/com/cloud/vm/VmWorkMigrate.java index 107f5fef3c1..5bcea9a8aca 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkMigrate.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkMigrate.java @@ -38,8 +38,9 @@ public class VmWorkMigrate extends VmWork { private Map storage; long srcHostId; - public VmWorkMigrate(long userId, long accountId, long vmId, long srcHostId, DeployDestination dst) { - super(userId, accountId, vmId); + public VmWorkMigrate(long userId, long accountId, long vmId, String handlerName, + long srcHostId, DeployDestination dst) { + super(userId, accountId, vmId, handlerName); this.srcHostId = srcHostId; zoneId = dst.getDataCenter() != null ? dst.getDataCenter().getId() : null; podId = dst.getPod() != null ? dst.getPod().getId() : null; diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkMigrateForScale.java b/engine/orchestration/src/com/cloud/vm/VmWorkMigrateForScale.java index 399ea91c895..e0ad90e1ad5 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkMigrateForScale.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkMigrateForScale.java @@ -19,19 +19,19 @@ package com.cloud.vm; import com.cloud.deploy.DeployDestination; public class VmWorkMigrateForScale extends VmWork { - private static final long serialVersionUID = 6854870395568389613L; + private static final long serialVersionUID = 6854870395568389613L; - long srcHostId; - DeployDestination deployDestination; - Long newSvcOfferingId; - - public VmWorkMigrateForScale(long userId, long accountId, long vmId, long srcHostId, - DeployDestination dest, Long newSvcOfferingId) { - - super(userId, accountId, vmId); - this.srcHostId = srcHostId; - this.deployDestination = dest; - this.newSvcOfferingId = newSvcOfferingId; + long srcHostId; + DeployDestination deployDestination; + Long newSvcOfferingId; + + public VmWorkMigrateForScale(long userId, long accountId, long vmId, String handlerName, long srcHostId, + DeployDestination dest, Long newSvcOfferingId) { + + super(userId, accountId, vmId, handlerName); + this.srcHostId = srcHostId; + deployDestination = dest; + this.newSvcOfferingId = newSvcOfferingId; } public long getSrcHostId() { @@ -39,10 +39,10 @@ public class VmWorkMigrateForScale extends VmWork { } public DeployDestination getDeployDestination() { - return this.deployDestination; + return deployDestination; } public Long getNewServiceOfferringId() { - return this.newSvcOfferingId; + return newSvcOfferingId; } } diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkMigrateWithStorage.java b/engine/orchestration/src/com/cloud/vm/VmWorkMigrateWithStorage.java index 75024dba49c..384955624c5 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkMigrateWithStorage.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkMigrateWithStorage.java @@ -22,31 +22,31 @@ import com.cloud.storage.StoragePool; import com.cloud.storage.Volume; public class VmWorkMigrateWithStorage extends VmWork { - private static final long serialVersionUID = -5626053872453569165L; + private static final long serialVersionUID = -5626053872453569165L; - long srcHostId; - long destHostId; - Map volumeToPool; - - public VmWorkMigrateWithStorage(long userId, long accountId, long vmId, long srcHostId, - long destHostId, Map volumeToPool) { - - super(userId, accountId, vmId); - - this.srcHostId = srcHostId; - this.destHostId = destHostId; - this.volumeToPool = volumeToPool; + long srcHostId; + long destHostId; + Map volumeToPool; + + public VmWorkMigrateWithStorage(long userId, long accountId, long vmId, String handlerName, long srcHostId, + long destHostId, Map volumeToPool) { + + super(userId, accountId, vmId, handlerName); + + this.srcHostId = srcHostId; + this.destHostId = destHostId; + this.volumeToPool = volumeToPool; } public long getSrcHostId() { - return this.srcHostId; + return srcHostId; } public long getDestHostId() { - return this.destHostId; + return destHostId; } public Map getVolumeToPool() { - return this.volumeToPool; + return volumeToPool; } } diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkReboot.java b/engine/orchestration/src/com/cloud/vm/VmWorkReboot.java index fecaaef1fb1..edecae86f06 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkReboot.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkReboot.java @@ -28,18 +28,10 @@ public class VmWorkReboot extends VmWork { // use serialization friendly map private Map rawParams; - public VmWorkReboot(long userId, long accountId, long vmId, Map params) { - super(userId, accountId, vmId); - - setParams(params); - } - - public Map getRawParams() { - return rawParams; - } - - public void setRawParams(Map params) { - rawParams = params; + public VmWorkReboot(long userId, long accountId, long vmId, String handlerName, Map params) { + super(userId, accountId, vmId, handlerName); + + setParams(params); } public Map getParams() { diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkReconfigure.java b/engine/orchestration/src/com/cloud/vm/VmWorkReconfigure.java index 6e2b6d8a2b0..c7fd310f422 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkReconfigure.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkReconfigure.java @@ -19,25 +19,25 @@ package com.cloud.vm; import com.cloud.offering.ServiceOffering; public class VmWorkReconfigure extends VmWork { - private static final long serialVersionUID = -4517030323758086615L; - - ServiceOffering newServiceOffering; - boolean sameHost; - - public VmWorkReconfigure(long userId, long accountId, long vmId, - ServiceOffering newServiceOffering, boolean sameHost) { - - super(userId, accountId, vmId); - - this.newServiceOffering = newServiceOffering; - this.sameHost = sameHost; + private static final long serialVersionUID = -4517030323758086615L; + + ServiceOffering newServiceOffering; + boolean sameHost; + + public VmWorkReconfigure(long userId, long accountId, long vmId, String handlerName, + ServiceOffering newServiceOffering, boolean sameHost) { + + super(userId, accountId, vmId, handlerName); + + this.newServiceOffering = newServiceOffering; + this.sameHost = sameHost; } public ServiceOffering getNewServiceOffering() { - return this.newServiceOffering; + return newServiceOffering; } public boolean isSameHost() { - return this.sameHost; + return sameHost; } } diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkRemoveNicFromVm.java b/engine/orchestration/src/com/cloud/vm/VmWorkRemoveNicFromVm.java index 8efe77bdda5..4d400b923bf 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkRemoveNicFromVm.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkRemoveNicFromVm.java @@ -19,15 +19,15 @@ package com.cloud.vm; public class VmWorkRemoveNicFromVm extends VmWork { private static final long serialVersionUID = -4265657031064437923L; - Nic nic; - - public VmWorkRemoveNicFromVm(long userId, long accountId, long vmId, Nic nic) { - super(userId, accountId, vmId); - - this.nic = nic; + Nic nic; + + public VmWorkRemoveNicFromVm(long userId, long accountId, long vmId, String handlerName, Nic nic) { + super(userId, accountId, vmId, handlerName); + + this.nic = nic; } public Nic getNic() { - return this.nic; + return nic; } } diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkRemoveVmFromNetwork.java b/engine/orchestration/src/com/cloud/vm/VmWorkRemoveVmFromNetwork.java index 0e94c2f35f9..0cb02b28f60 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkRemoveVmFromNetwork.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkRemoveVmFromNetwork.java @@ -21,23 +21,23 @@ import java.net.URI; import com.cloud.network.Network; public class VmWorkRemoveVmFromNetwork extends VmWork { - private static final long serialVersionUID = -5070392905642149925L; + private static final long serialVersionUID = -5070392905642149925L; - Network network; - URI broadcastUri; - - public VmWorkRemoveVmFromNetwork(long userId, long accountId, long vmId, Network network, URI broadcastUri) { - super(userId, accountId, vmId); - - this.network = network; - this.broadcastUri = broadcastUri; - } - - public Network getNetwork() { - return this.network; - } - - public URI getBroadcastUri() { - return this.broadcastUri; - } + Network network; + URI broadcastUri; + + public VmWorkRemoveVmFromNetwork(long userId, long accountId, long vmId, String handlerName, Network network, URI broadcastUri) { + super(userId, accountId, vmId, handlerName); + + this.network = network; + this.broadcastUri = broadcastUri; + } + + public Network getNetwork() { + return network; + } + + public URI getBroadcastUri() { + return broadcastUri; + } } diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkStart.java b/engine/orchestration/src/com/cloud/vm/VmWorkStart.java index e023801445d..4f30e5dc807 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkStart.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkStart.java @@ -50,76 +50,76 @@ public class VmWorkStart extends VmWork { // use serialization friendly map private Map rawParams; - public VmWorkStart(long userId, long accountId, long vmId) { - super(userId, accountId, vmId); - } + public VmWorkStart(long userId, long accountId, long vmId, String handlerName) { + super(userId, accountId, vmId, handlerName); + } - public DeploymentPlan getPlan() { + public DeploymentPlan getPlan() { + + if(podId != null || clusterId != null || hostId != null || poolId != null || physicalNetworkId != null) { + // this is ugly, to work with legacy code, we need to re-construct the DeploymentPlan hard-codely + // this has to be refactored together with migrating legacy code into the new way + ReservationContext context = null; + if(reservationId != null) { + Journal journal = new Journal.LogJournal("VmWorkStart", s_logger); + context = new ReservationContextImpl(reservationId, journal, + CallContext.current().getCallingUser(), + CallContext.current().getCallingAccount()); + } + + DeploymentPlan plan = new DataCenterDeployment( + dcId, podId, clusterId, hostId, poolId, physicalNetworkId, + context); + return plan; + } + + return null; + } - if (podId != null || clusterId != null || hostId != null || poolId != null || physicalNetworkId != null) { - // this is ugly, to work with legacy code, we need to re-construct the DeploymentPlan hard-codely - // this has to be refactored together with migrating legacy code into the new way - ReservationContext context = null; - if (reservationId != null) { - Journal journal = new Journal.LogJournal("VmWorkStart", s_logger); - context = new ReservationContextImpl(reservationId, journal, - CallContext.current().getCallingUser(), - CallContext.current().getCallingAccount()); - } + public void setPlan(DeploymentPlan plan) { + if(plan != null) { + dcId = plan.getDataCenterId(); + podId = plan.getPodId(); + clusterId = plan.getClusterId(); + hostId = plan.getHostId(); + poolId = plan.getPoolId(); + physicalNetworkId = plan.getPhysicalNetworkId(); + avoids = plan.getAvoids(); + + if(plan.getReservationContext() != null) + reservationId = plan.getReservationContext().getReservationId(); + } + } - DeploymentPlan plan = new DataCenterDeployment( - dcId, podId, clusterId, hostId, poolId, physicalNetworkId, - context); - return plan; - } + public Map getRawParams() { + return rawParams; + } - return null; - } - - public void setPlan(DeploymentPlan plan) { - if (plan != null) { - dcId = plan.getDataCenterId(); - podId = plan.getPodId(); - clusterId = plan.getClusterId(); - hostId = plan.getHostId(); - poolId = plan.getPoolId(); - physicalNetworkId = plan.getPhysicalNetworkId(); - avoids = plan.getAvoids(); - - if (plan.getReservationContext() != null) - reservationId = plan.getReservationContext().getReservationId(); - } - } - - public Map getRawParams() { - return rawParams; - } - - public void setRawParams(Map params) { - rawParams = params; - } - - public Map getParams() { - Map map = new HashMap(); - - if (rawParams != null) { - for (Map.Entry entry : rawParams.entrySet()) { - VirtualMachineProfile.Param key = new VirtualMachineProfile.Param(entry.getKey()); - Object val = JobSerializerHelper.fromObjectSerializedString(entry.getValue()); - map.put(key, val); - } - } - - return map; - } - - public void setParams(Map params) { - if (params != null) { - rawParams = new HashMap(); - for (Map.Entry entry : params.entrySet()) { - rawParams.put(entry.getKey().getName(), JobSerializerHelper.toObjectSerializedString( - entry.getValue() instanceof Serializable ? (Serializable)entry.getValue() : entry.getValue().toString())); - } - } - } + public void setRawParams(Map params) { + rawParams = params; + } + + public Map getParams() { + Map map = new HashMap(); + + if(rawParams != null) { + for(Map.Entry entry : rawParams.entrySet()) { + VirtualMachineProfile.Param key = new VirtualMachineProfile.Param(entry.getKey()); + Object val = JobSerializerHelper.fromObjectSerializedString(entry.getValue()); + map.put(key, val); + } + } + + return map; + } + + public void setParams(Map params) { + if(params != null) { + rawParams = new HashMap(); + for(Map.Entry entry : params.entrySet()) { + rawParams.put(entry.getKey().getName(), JobSerializerHelper.toObjectSerializedString( + entry.getValue() instanceof Serializable ? (Serializable)entry.getValue() : entry.getValue().toString())); + } + } + } } diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkStop.java b/engine/orchestration/src/com/cloud/vm/VmWorkStop.java index f0bc88594e9..ff3bfb9481a 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkStop.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkStop.java @@ -17,12 +17,12 @@ package com.cloud.vm; public class VmWorkStop extends VmWork { - private static final long serialVersionUID = 202908740486785251L; - - private final boolean cleanup; - - public VmWorkStop(long userId, long accountId, long vmId, boolean cleanup) { - super(userId, accountId, vmId); + private static final long serialVersionUID = 202908740486785251L; + + private final boolean cleanup; + + public VmWorkStop(long userId, long accountId, long vmId, String handlerName, boolean cleanup) { + super(userId, accountId, vmId, handlerName); this.cleanup = cleanup; } diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkStorageMigration.java b/engine/orchestration/src/com/cloud/vm/VmWorkStorageMigration.java index 76a35b23cf0..bcee88a22f0 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkStorageMigration.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkStorageMigration.java @@ -23,13 +23,13 @@ public class VmWorkStorageMigration extends VmWork { StoragePool destPool; - public VmWorkStorageMigration(long userId, long accountId, long vmId, StoragePool destPool) { - super(userId, accountId, vmId); - - this.destPool = destPool; + public VmWorkStorageMigration(long userId, long accountId, long vmId, String handlerName, StoragePool destPool) { + super(userId, accountId, vmId, handlerName); + + this.destPool = destPool; } public StoragePool getDestStoragePool() { - return this.destPool; + return destPool; } } diff --git a/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml b/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml index d37303476a8..35683f0b2d2 100644 --- a/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml +++ b/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml @@ -20,11 +20,16 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:context="http://www.springframework.org/schema/context" xmlns:aop="http://www.springframework.org/schema/aop" + xmlns:util="http://www.springframework.org/schema/util" + xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.0.xsd - http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop-3.0.xsd + http://www.springframework.org/schema/aop + http://www.springframework.org/schema/aop/spring-aop-3.0.xsd http://www.springframework.org/schema/context - http://www.springframework.org/schema/context/spring-context-3.0.xsd" + http://www.springframework.org/schema/context/spring-context-3.0.xsd + http://www.springframework.org/schema/util + http://www.springframework.org/schema/util/spring-util-3.0.xsd" > @@ -230,5 +235,4 @@ class="org.apache.cloudstack.region.gslb.GlobalLoadBalancingRulesServiceImpl" /> - From 719271b1d9d1a716b4724d0af10c9915c435a9ce Mon Sep 17 00:00:00 2001 From: Kelven Yang Date: Mon, 16 Dec 2013 15:15:15 -0800 Subject: [PATCH 009/312] Fix tab and trailing spaces --- .../com/cloud/vm/VirtualMachineManager.java | 6 +- .../src/com/cloud/vm/VmWork.java | 8 +- .../cloud/vm/VirtualMachineManagerImpl.java | 640 +++++++++--------- .../com/cloud/vm/VmWorkAddVmToNetwork.java | 32 +- .../src/com/cloud/vm/VmWorkJobDispatcher.java | 15 +- .../com/cloud/vm/VmWorkMigrateForScale.java | 24 +- .../cloud/vm/VmWorkMigrateWithStorage.java | 28 +- .../src/com/cloud/vm/VmWorkReboot.java | 4 +- .../src/com/cloud/vm/VmWorkReconfigure.java | 24 +- .../com/cloud/vm/VmWorkRemoveNicFromVm.java | 10 +- .../cloud/vm/VmWorkRemoveVmFromNetwork.java | 32 +- .../src/com/cloud/vm/VmWorkStart.java | 132 ++-- .../src/com/cloud/vm/VmWorkStop.java | 8 +- .../com/cloud/vm/VmWorkStorageMigration.java | 6 +- 14 files changed, 485 insertions(+), 484 deletions(-) diff --git a/engine/api/src/com/cloud/vm/VirtualMachineManager.java b/engine/api/src/com/cloud/vm/VirtualMachineManager.java index 05cbfc2cb7f..4c2222cf412 100644 --- a/engine/api/src/com/cloud/vm/VirtualMachineManager.java +++ b/engine/api/src/com/cloud/vm/VirtualMachineManager.java @@ -108,9 +108,9 @@ public interface VirtualMachineManager extends Manager { void migrateAway(String vmUuid, long hostId) throws InsufficientServerCapacityException; void migrate(String vmUuid, long srcHostId, DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException; - + void migrateWithStorage(String vmUuid, long srcId, long destId, Map volumeToPool) throws ResourceUnavailableException, ConcurrentOperationException; - + void reboot(String vmUuid, Map params) throws InsufficientCapacityException, ResourceUnavailableException; void advanceReboot(String vmUuid, Map params) throws InsufficientCapacityException, ResourceUnavailableException, @@ -153,7 +153,7 @@ public interface VirtualMachineManager extends Manager { */ NicProfile addVmToNetwork(VirtualMachine vm, Network network, NicProfile requested) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException; - + /** * @param vm * @param nic diff --git a/engine/components-api/src/com/cloud/vm/VmWork.java b/engine/components-api/src/com/cloud/vm/VmWork.java index 98e46a3f3a6..ed9f44cb9bb 100644 --- a/engine/components-api/src/com/cloud/vm/VmWork.java +++ b/engine/components-api/src/com/cloud/vm/VmWork.java @@ -32,7 +32,7 @@ public class VmWork implements Serializable { this.accountId = accountId; this.vmId = vmId; this.handlerName = handlerName; - } + } public long getUserId() { return userId; @@ -42,9 +42,9 @@ public class VmWork implements Serializable { return accountId; } - public long getVmId() { - return vmId; - } + public long getVmId() { + return vmId; + } public String getHandlerName() { return handlerName; diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java index f89d83c1da6..21b873bd922 100755 --- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -734,11 +734,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } } - + private void orchestrateStart(String vmUuid, Map params, DeploymentPlan planToDeploy) throws InsufficientCapacityException, - ConcurrentOperationException, ResourceUnavailableException { - - CallContext cctxt = CallContext.current(); + ConcurrentOperationException, ResourceUnavailableException { + + CallContext cctxt = CallContext.current(); Account account = cctxt.getCallingAccount(); User caller = cctxt.getCallingUser(); @@ -1627,7 +1627,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } } - + private void orchestrateMigrate(String vmUuid, long srcHostId, DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); if (vm == null) { @@ -1896,10 +1896,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } } - + private void orchestrateMigrateWithStorage(String vmUuid, long srcHostId, long destHostId, Map volumeToPool) throws ResourceUnavailableException, - ConcurrentOperationException { - + ConcurrentOperationException { + VMInstanceVO vm = _vmDao.findByUuid(vmUuid); HostVO srcHost = _hostDao.findById(srcHostId); @@ -2177,7 +2177,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } } - + private void orchestrateReboot(String vmUuid, Map params) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); @@ -3108,7 +3108,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } } - + private NicProfile orchestrateAddVmToNetwork(VirtualMachine vm, Network network, NicProfile requested) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { CallContext cctx = CallContext.current(); @@ -3448,9 +3448,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } private void orchestrateMigrateForScale(String vmUuid, long srcHostId, DeployDestination dest, Long oldSvcOfferingId) - throws ResourceUnavailableException, ConcurrentOperationException { - - VMInstanceVO vm = _vmDao.findByUuid(vmUuid); + throws ResourceUnavailableException, ConcurrentOperationException { + + VMInstanceVO vm = _vmDao.findByUuid(vmUuid); s_logger.info("Migrating " + vm + " to " + dest); vm.getServiceOfferingId(); @@ -3703,7 +3703,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } } - + private VMInstanceVO orchestrateReConfigureVm(String vmUuid, ServiceOffering oldServiceOffering, boolean reconfiguringOnExistingHost) throws ResourceUnavailableException, ConcurrentOperationException { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); @@ -4163,26 +4163,26 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac workJob.setUserId(callingUser.getId()); workJob.setStep(VmWorkJobVO.Step.Starting); workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); + workJob.setVmInstanceId(vm.getId()); - // save work context info (there are some duplications) + // save work context info (there are some duplications) VmWorkStart workInfo = new VmWorkStart(callingUser.getId(), callingAccount.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER); - workInfo.setPlan(planToDeploy); - workInfo.setParams(params); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + workInfo.setPlan(planToDeploy); + workInfo.setParams(params); + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + + // Transaction syntax sugar has a cost here + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - - // Transaction syntax sugar has a cost here - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); - return new VmStateSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), VirtualMachine.PowerState.PowerOn, vm.getId(), null); } @@ -4207,33 +4207,33 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { assert (pendingWorkJobs.size() == 1); workJob = pendingWorkJobs.get(0); - } else { - workJob = new VmWorkJobVO(context.getContextId()); - - workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); - workJob.setCmd(VmWorkStop.class.getName()); - - workJob.setAccountId(account.getId()); - workJob.setUserId(user.getId()); - workJob.setStep(VmWorkJobVO.Step.Prepare); - workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); - - // save work context info (there are some duplications) - VmWorkStop workInfo = new VmWorkStop(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, cleanup); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); + } else { + workJob = new VmWorkJobVO(context.getContextId()); + + workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); + workJob.setCmd(VmWorkStop.class.getName()); + + workJob.setAccountId(account.getId()); + workJob.setUserId(user.getId()); + workJob.setStep(VmWorkJobVO.Step.Prepare); + workJob.setVmType(vm.getType()); + workJob.setVmInstanceId(vm.getId()); + + // save work context info (there are some duplications) + VmWorkStop workInfo = new VmWorkStop(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, cleanup); + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); - return new VmStateSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), VirtualMachine.PowerState.PowerOff, vm.getId(), null); } @@ -4265,28 +4265,28 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); workJob.setCmd(VmWorkReboot.class.getName()); - - workJob.setAccountId(account.getId()); - workJob.setUserId(user.getId()); - workJob.setStep(VmWorkJobVO.Step.Prepare); - workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); - - // save work context info (there are some duplications) - VmWorkReboot workInfo = new VmWorkReboot(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, params); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); - + workJob.setAccountId(account.getId()); + workJob.setUserId(user.getId()); + workJob.setStep(VmWorkJobVO.Step.Prepare); + workJob.setVmType(vm.getType()); + workJob.setVmInstanceId(vm.getId()); + + // save work context info (there are some duplications) + VmWorkReboot workInfo = new VmWorkReboot(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, params); + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + return new VmJobSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), vm.getId()); } @@ -4301,42 +4301,42 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - _vmDao.lockRow(vm.getId(), true); - - List pendingWorkJobs = _workJobDao.listPendingWorkJobs( - VirtualMachine.Type.Instance, vm.getId(), + _vmDao.lockRow(vm.getId(), true); + + List pendingWorkJobs = _workJobDao.listPendingWorkJobs( + VirtualMachine.Type.Instance, vm.getId(), VmWorkMigrate.class.getName()); - - VmWorkJobVO workJob = null; - if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { - assert (pendingWorkJobs.size() == 1); - workJob = pendingWorkJobs.get(0); - } else { - - workJob = new VmWorkJobVO(context.getContextId()); - - workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); - workJob.setCmd(VmWorkMigrate.class.getName()); - - workJob.setAccountId(account.getId()); - workJob.setUserId(user.getId()); - workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); - - // save work context info (there are some duplications) + + VmWorkJobVO workJob = null; + if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { + assert (pendingWorkJobs.size() == 1); + workJob = pendingWorkJobs.get(0); + } else { + + workJob = new VmWorkJobVO(context.getContextId()); + + workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); + workJob.setCmd(VmWorkMigrate.class.getName()); + + workJob.setAccountId(account.getId()); + workJob.setUserId(user.getId()); + workJob.setVmType(vm.getType()); + workJob.setVmInstanceId(vm.getId()); + + // save work context info (there are some duplications) VmWorkMigrate workInfo = new VmWorkMigrate(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, srcHostId, dest); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); - + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + return new VmStateSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), VirtualMachine.PowerState.PowerOn, vm.getId(), vm.getPowerHostId()); } @@ -4354,43 +4354,43 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - _vmDao.lockRow(vm.getId(), true); - - List pendingWorkJobs = _workJobDao.listPendingWorkJobs( - VirtualMachine.Type.Instance, vm.getId(), + _vmDao.lockRow(vm.getId(), true); + + List pendingWorkJobs = _workJobDao.listPendingWorkJobs( + VirtualMachine.Type.Instance, vm.getId(), VmWorkMigrateWithStorage.class.getName()); - - VmWorkJobVO workJob = null; - if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { - assert (pendingWorkJobs.size() == 1); - workJob = pendingWorkJobs.get(0); - } else { - - workJob = new VmWorkJobVO(context.getContextId()); - - workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); - workJob.setCmd(VmWorkMigrate.class.getName()); - - workJob.setAccountId(account.getId()); - workJob.setUserId(user.getId()); - workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); - - // save work context info (there are some duplications) + + VmWorkJobVO workJob = null; + if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { + assert (pendingWorkJobs.size() == 1); + workJob = pendingWorkJobs.get(0); + } else { + + workJob = new VmWorkJobVO(context.getContextId()); + + workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); + workJob.setCmd(VmWorkMigrate.class.getName()); + + workJob.setAccountId(account.getId()); + workJob.setUserId(user.getId()); + workJob.setVmType(vm.getType()); + workJob.setVmInstanceId(vm.getId()); + + // save work context info (there are some duplications) VmWorkMigrateWithStorage workInfo = new VmWorkMigrateWithStorage(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, srcHostId, destHostId, volumeToPool); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); - + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + return new VmStateSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), VirtualMachine.PowerState.PowerOn, vm.getId(), destHostId); } @@ -4407,43 +4407,43 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - _vmDao.lockRow(vm.getId(), true); - - List pendingWorkJobs = _workJobDao.listPendingWorkJobs( - VirtualMachine.Type.Instance, vm.getId(), + _vmDao.lockRow(vm.getId(), true); + + List pendingWorkJobs = _workJobDao.listPendingWorkJobs( + VirtualMachine.Type.Instance, vm.getId(), VmWorkMigrateForScale.class.getName()); - - VmWorkJobVO workJob = null; - if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { - assert (pendingWorkJobs.size() == 1); - workJob = pendingWorkJobs.get(0); - } else { - - workJob = new VmWorkJobVO(context.getContextId()); - - workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); + + VmWorkJobVO workJob = null; + if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { + assert (pendingWorkJobs.size() == 1); + workJob = pendingWorkJobs.get(0); + } else { + + workJob = new VmWorkJobVO(context.getContextId()); + + workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); workJob.setCmd(VmWorkMigrate.class.getName()); - - workJob.setAccountId(account.getId()); - workJob.setUserId(user.getId()); - workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); - - // save work context info (there are some duplications) + + workJob.setAccountId(account.getId()); + workJob.setUserId(user.getId()); + workJob.setVmType(vm.getType()); + workJob.setVmInstanceId(vm.getId()); + + // save work context info (there are some duplications) VmWorkMigrateForScale workInfo = new VmWorkMigrateForScale(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, srcHostId, dest, newSvcOfferingId); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); - + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + return new VmJobSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), vm.getId()); } @@ -4459,43 +4459,43 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - _vmDao.lockRow(vm.getId(), true); - - List pendingWorkJobs = _workJobDao.listPendingWorkJobs( - VirtualMachine.Type.Instance, vm.getId(), + _vmDao.lockRow(vm.getId(), true); + + List pendingWorkJobs = _workJobDao.listPendingWorkJobs( + VirtualMachine.Type.Instance, vm.getId(), VmWorkStorageMigration.class.getName()); - - VmWorkJobVO workJob = null; - if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { - assert (pendingWorkJobs.size() == 1); - workJob = pendingWorkJobs.get(0); - } else { - - workJob = new VmWorkJobVO(context.getContextId()); - - workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); + + VmWorkJobVO workJob = null; + if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { + assert (pendingWorkJobs.size() == 1); + workJob = pendingWorkJobs.get(0); + } else { + + workJob = new VmWorkJobVO(context.getContextId()); + + workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); workJob.setCmd(VmWorkStorageMigration.class.getName()); - - workJob.setAccountId(account.getId()); - workJob.setUserId(user.getId()); - workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); - - // save work context info (there are some duplications) + + workJob.setAccountId(account.getId()); + workJob.setUserId(user.getId()); + workJob.setVmType(vm.getType()); + workJob.setVmInstanceId(vm.getId()); + + // save work context info (there are some duplications) VmWorkStorageMigration workInfo = new VmWorkStorageMigration(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, destPool); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); - + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + return new VmJobSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), vm.getId()); } @@ -4559,42 +4559,42 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - _vmDao.lockRow(vm.getId(), true); - - List pendingWorkJobs = _workJobDao.listPendingWorkJobs( - VirtualMachine.Type.Instance, vm.getId(), - VmWorkRemoveNicFromVm.class.getName()); - - VmWorkJobVO workJob = null; - if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { - assert (pendingWorkJobs.size() == 1); - workJob = pendingWorkJobs.get(0); - } else { - - workJob = new VmWorkJobVO(context.getContextId()); - - workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); - workJob.setCmd(VmWorkRemoveNicFromVm.class.getName()); - - workJob.setAccountId(account.getId()); - workJob.setUserId(user.getId()); - workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); - - // save work context info (there are some duplications) - VmWorkRemoveNicFromVm workInfo = new VmWorkRemoveNicFromVm(user.getId(), account.getId(), vm.getId(), + _vmDao.lockRow(vm.getId(), true); + + List pendingWorkJobs = _workJobDao.listPendingWorkJobs( + VirtualMachine.Type.Instance, vm.getId(), + VmWorkRemoveNicFromVm.class.getName()); + + VmWorkJobVO workJob = null; + if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { + assert (pendingWorkJobs.size() == 1); + workJob = pendingWorkJobs.get(0); + } else { + + workJob = new VmWorkJobVO(context.getContextId()); + + workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); + workJob.setCmd(VmWorkRemoveNicFromVm.class.getName()); + + workJob.setAccountId(account.getId()); + workJob.setUserId(user.getId()); + workJob.setVmType(vm.getType()); + workJob.setVmInstanceId(vm.getId()); + + // save work context info (there are some duplications) + VmWorkRemoveNicFromVm workInfo = new VmWorkRemoveNicFromVm(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, nic); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); return new VmJobSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), vm.getId()); } @@ -4609,43 +4609,43 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - _vmDao.lockRow(vm.getId(), true); - - List pendingWorkJobs = _workJobDao.listPendingWorkJobs( - VirtualMachine.Type.Instance, vm.getId(), - VmWorkRemoveVmFromNetwork.class.getName()); - - VmWorkJobVO workJob = null; - if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { - assert (pendingWorkJobs.size() == 1); - workJob = pendingWorkJobs.get(0); - } else { - - workJob = new VmWorkJobVO(context.getContextId()); - - workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); - workJob.setCmd(VmWorkRemoveVmFromNetwork.class.getName()); - - workJob.setAccountId(account.getId()); - workJob.setUserId(user.getId()); - workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); - - // save work context info (there are some duplications) - VmWorkRemoveVmFromNetwork workInfo = new VmWorkRemoveVmFromNetwork(user.getId(), account.getId(), vm.getId(), + _vmDao.lockRow(vm.getId(), true); + + List pendingWorkJobs = _workJobDao.listPendingWorkJobs( + VirtualMachine.Type.Instance, vm.getId(), + VmWorkRemoveVmFromNetwork.class.getName()); + + VmWorkJobVO workJob = null; + if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { + assert (pendingWorkJobs.size() == 1); + workJob = pendingWorkJobs.get(0); + } else { + + workJob = new VmWorkJobVO(context.getContextId()); + + workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); + workJob.setCmd(VmWorkRemoveVmFromNetwork.class.getName()); + + workJob.setAccountId(account.getId()); + workJob.setUserId(user.getId()); + workJob.setVmType(vm.getType()); + workJob.setVmInstanceId(vm.getId()); + + // save work context info (there are some duplications) + VmWorkRemoveVmFromNetwork workInfo = new VmWorkRemoveVmFromNetwork(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, network, broadcastUri); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); - + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + return new VmJobSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), vm.getId()); } @@ -4661,46 +4661,46 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - _vmDao.lockRow(vm.getId(), true); - - List pendingWorkJobs = _workJobDao.listPendingWorkJobs( - VirtualMachine.Type.Instance, vm.getId(), - VmWorkReconfigure.class.getName()); - - VmWorkJobVO workJob = null; - if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { - assert (pendingWorkJobs.size() == 1); - workJob = pendingWorkJobs.get(0); - } else { - - workJob = new VmWorkJobVO(context.getContextId()); - - workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); - workJob.setCmd(VmWorkReconfigure.class.getName()); - - workJob.setAccountId(account.getId()); - workJob.setUserId(user.getId()); - workJob.setVmType(vm.getType()); - workJob.setVmInstanceId(vm.getId()); - - // save work context info (there are some duplications) - VmWorkReconfigure workInfo = new VmWorkReconfigure(user.getId(), account.getId(), vm.getId(), + _vmDao.lockRow(vm.getId(), true); + + List pendingWorkJobs = _workJobDao.listPendingWorkJobs( + VirtualMachine.Type.Instance, vm.getId(), + VmWorkReconfigure.class.getName()); + + VmWorkJobVO workJob = null; + if (pendingWorkJobs != null && pendingWorkJobs.size() > 0) { + assert (pendingWorkJobs.size() == 1); + workJob = pendingWorkJobs.get(0); + } else { + + workJob = new VmWorkJobVO(context.getContextId()); + + workJob.setDispatcher(VmWorkJobDispatcher.VM_WORK_JOB_DISPATCHER); + workJob.setCmd(VmWorkReconfigure.class.getName()); + + workJob.setAccountId(account.getId()); + workJob.setUserId(user.getId()); + workJob.setVmType(vm.getType()); + workJob.setVmInstanceId(vm.getId()); + + // save work context info (there are some duplications) + VmWorkReconfigure workInfo = new VmWorkReconfigure(user.getId(), account.getId(), vm.getId(), VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, oldServiceOffering, reconfiguringOnExistingHost); - workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); - - _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); - } - context.putContextParameter("workJob", workJob); - context.putContextParameter("jobId", new Long(workJob.getId())); - } - }); - - final long jobId = (Long)context.getContextParameter("jobId"); - AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); - + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkJobDispatcher.VM_WORK_QUEUE, vm.getId()); + } + context.putContextParameter("workJob", workJob); + context.putContextParameter("jobId", new Long(workJob.getId())); + } + }); + + final long jobId = (Long)context.getContextParameter("jobId"); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(jobId); + return new VmJobSyncOutcome((VmWorkJobVO)context.getContextParameter("workJob"), vm.getId()); } - + @Override public Pair handleVmWorkJob(AsyncJob job, VmWork work) throws Exception { diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkAddVmToNetwork.java b/engine/orchestration/src/com/cloud/vm/VmWorkAddVmToNetwork.java index 2a577f3aefb..bb3b49607af 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkAddVmToNetwork.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkAddVmToNetwork.java @@ -21,22 +21,22 @@ import com.cloud.network.Network; public class VmWorkAddVmToNetwork extends VmWork { private static final long serialVersionUID = 8861516006586736813L; - Network network; - NicProfile requstedNicProfile; - + Network network; + NicProfile requstedNicProfile; + public VmWorkAddVmToNetwork(long userId, long accountId, long vmId, String handlerName, - Network network, NicProfile requested) { + Network network, NicProfile requested) { super(userId, accountId, vmId, handlerName); - - this.network = network; - requstedNicProfile = requested; - } - - public Network getNetwork() { - return network; - } - - public NicProfile getRequestedNicProfile() { - return requstedNicProfile; - } + + this.network = network; + requstedNicProfile = requested; + } + + public Network getNetwork() { + return network; + } + + public NicProfile getRequestedNicProfile() { + return requstedNicProfile; + } } diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkJobDispatcher.java b/engine/orchestration/src/com/cloud/vm/VmWorkJobDispatcher.java index e29a99c5916..7ed9f39411f 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkJobDispatcher.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkJobDispatcher.java @@ -39,11 +39,12 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch public static final String VM_WORK_QUEUE = "VmWorkJobQueue"; public static final String VM_WORK_JOB_DISPATCHER = "VmWorkJobDispatcher"; public static final String VM_WORK_JOB_WAKEUP_DISPATCHER = "VmWorkJobWakeupDispatcher"; - + @Inject private VirtualMachineManagerImpl _vmMgr; - @Inject private AsyncJobManager _asyncJobMgr; + @Inject + private AsyncJobManager _asyncJobMgr; @Inject private VMInstanceDao _instanceDao; - + private Map _handlers; public VmWorkJobDispatcher() { @@ -57,7 +58,7 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch _handlers = handlers; } - @Override + @Override public void runJob(AsyncJob job) { VmWork work = null; try { @@ -83,7 +84,7 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, "Unable to deserialize VM work"); return; } - + if (_handlers == null || _handlers.isEmpty()) { s_logger.error("Invalid startup configuration, no work job handler is found. cmd: " + job.getCmd() + ", job info: " + job.getCmdInfo()); _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, "Invalid startup configuration. no job handler is found"); @@ -91,7 +92,7 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch } VmWorkJobHandler handler = _handlers.get(work.getHandlerName()); - + if (handler == null) { s_logger.error("Unable to find work job handler. handler name: " + work.getHandlerName() + ", job cmd: " + job.getCmd() + ", job info: " + job.getCmdInfo()); _asyncJobMgr.completeAsyncJob(job.getId(), JobInfo.Status.FAILED, 0, "Unable to find work job handler"); @@ -99,7 +100,7 @@ public class VmWorkJobDispatcher extends AdapterBase implements AsyncJobDispatch } CallContext.register(work.getUserId(), work.getAccountId(), job.getRelated()); - + Pair result = handler.handleVmWorkJob(job, work); _asyncJobMgr.completeAsyncJob(job.getId(), result.first(), 0, result.second()); } catch(Throwable e) { diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkMigrateForScale.java b/engine/orchestration/src/com/cloud/vm/VmWorkMigrateForScale.java index e0ad90e1ad5..0dd4d88c218 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkMigrateForScale.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkMigrateForScale.java @@ -19,19 +19,19 @@ package com.cloud.vm; import com.cloud.deploy.DeployDestination; public class VmWorkMigrateForScale extends VmWork { - private static final long serialVersionUID = 6854870395568389613L; + private static final long serialVersionUID = 6854870395568389613L; + + long srcHostId; + DeployDestination deployDestination; + Long newSvcOfferingId; - long srcHostId; - DeployDestination deployDestination; - Long newSvcOfferingId; - public VmWorkMigrateForScale(long userId, long accountId, long vmId, String handlerName, long srcHostId, - DeployDestination dest, Long newSvcOfferingId) { - + DeployDestination dest, Long newSvcOfferingId) { + super(userId, accountId, vmId, handlerName); - this.srcHostId = srcHostId; - deployDestination = dest; - this.newSvcOfferingId = newSvcOfferingId; + this.srcHostId = srcHostId; + deployDestination = dest; + this.newSvcOfferingId = newSvcOfferingId; } public long getSrcHostId() { @@ -39,10 +39,10 @@ public class VmWorkMigrateForScale extends VmWork { } public DeployDestination getDeployDestination() { - return deployDestination; + return deployDestination; } public Long getNewServiceOfferringId() { - return newSvcOfferingId; + return newSvcOfferingId; } } diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkMigrateWithStorage.java b/engine/orchestration/src/com/cloud/vm/VmWorkMigrateWithStorage.java index 384955624c5..ee30c74abfb 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkMigrateWithStorage.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkMigrateWithStorage.java @@ -22,31 +22,31 @@ import com.cloud.storage.StoragePool; import com.cloud.storage.Volume; public class VmWorkMigrateWithStorage extends VmWork { - private static final long serialVersionUID = -5626053872453569165L; + private static final long serialVersionUID = -5626053872453569165L; + + long srcHostId; + long destHostId; + Map volumeToPool; - long srcHostId; - long destHostId; - Map volumeToPool; - public VmWorkMigrateWithStorage(long userId, long accountId, long vmId, String handlerName, long srcHostId, - long destHostId, Map volumeToPool) { - + long destHostId, Map volumeToPool) { + super(userId, accountId, vmId, handlerName); - - this.srcHostId = srcHostId; - this.destHostId = destHostId; - this.volumeToPool = volumeToPool; + + this.srcHostId = srcHostId; + this.destHostId = destHostId; + this.volumeToPool = volumeToPool; } public long getSrcHostId() { - return srcHostId; + return srcHostId; } public long getDestHostId() { - return destHostId; + return destHostId; } public Map getVolumeToPool() { - return volumeToPool; + return volumeToPool; } } diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkReboot.java b/engine/orchestration/src/com/cloud/vm/VmWorkReboot.java index edecae86f06..63c7d004968 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkReboot.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkReboot.java @@ -30,8 +30,8 @@ public class VmWorkReboot extends VmWork { public VmWorkReboot(long userId, long accountId, long vmId, String handlerName, Map params) { super(userId, accountId, vmId, handlerName); - - setParams(params); + + setParams(params); } public Map getParams() { diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkReconfigure.java b/engine/orchestration/src/com/cloud/vm/VmWorkReconfigure.java index c7fd310f422..4c564d5dc38 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkReconfigure.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkReconfigure.java @@ -19,25 +19,25 @@ package com.cloud.vm; import com.cloud.offering.ServiceOffering; public class VmWorkReconfigure extends VmWork { - private static final long serialVersionUID = -4517030323758086615L; - - ServiceOffering newServiceOffering; - boolean sameHost; - + private static final long serialVersionUID = -4517030323758086615L; + + ServiceOffering newServiceOffering; + boolean sameHost; + public VmWorkReconfigure(long userId, long accountId, long vmId, String handlerName, - ServiceOffering newServiceOffering, boolean sameHost) { - + ServiceOffering newServiceOffering, boolean sameHost) { + super(userId, accountId, vmId, handlerName); - - this.newServiceOffering = newServiceOffering; - this.sameHost = sameHost; + + this.newServiceOffering = newServiceOffering; + this.sameHost = sameHost; } public ServiceOffering getNewServiceOffering() { - return newServiceOffering; + return newServiceOffering; } public boolean isSameHost() { - return sameHost; + return sameHost; } } diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkRemoveNicFromVm.java b/engine/orchestration/src/com/cloud/vm/VmWorkRemoveNicFromVm.java index 4d400b923bf..29264e6f64c 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkRemoveNicFromVm.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkRemoveNicFromVm.java @@ -19,15 +19,15 @@ package com.cloud.vm; public class VmWorkRemoveNicFromVm extends VmWork { private static final long serialVersionUID = -4265657031064437923L; - Nic nic; - + Nic nic; + public VmWorkRemoveNicFromVm(long userId, long accountId, long vmId, String handlerName, Nic nic) { super(userId, accountId, vmId, handlerName); - - this.nic = nic; + + this.nic = nic; } public Nic getNic() { - return nic; + return nic; } } diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkRemoveVmFromNetwork.java b/engine/orchestration/src/com/cloud/vm/VmWorkRemoveVmFromNetwork.java index 0cb02b28f60..535b8d00faa 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkRemoveVmFromNetwork.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkRemoveVmFromNetwork.java @@ -21,23 +21,23 @@ import java.net.URI; import com.cloud.network.Network; public class VmWorkRemoveVmFromNetwork extends VmWork { - private static final long serialVersionUID = -5070392905642149925L; + private static final long serialVersionUID = -5070392905642149925L; + + Network network; + URI broadcastUri; - Network network; - URI broadcastUri; - public VmWorkRemoveVmFromNetwork(long userId, long accountId, long vmId, String handlerName, Network network, URI broadcastUri) { super(userId, accountId, vmId, handlerName); - - this.network = network; - this.broadcastUri = broadcastUri; - } - - public Network getNetwork() { - return network; - } - - public URI getBroadcastUri() { - return broadcastUri; - } + + this.network = network; + this.broadcastUri = broadcastUri; + } + + public Network getNetwork() { + return network; + } + + public URI getBroadcastUri() { + return broadcastUri; + } } diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkStart.java b/engine/orchestration/src/com/cloud/vm/VmWorkStart.java index 4f30e5dc807..f1b2efdbeac 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkStart.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkStart.java @@ -52,74 +52,74 @@ public class VmWorkStart extends VmWork { public VmWorkStart(long userId, long accountId, long vmId, String handlerName) { super(userId, accountId, vmId, handlerName); - } + } - public DeploymentPlan getPlan() { - - if(podId != null || clusterId != null || hostId != null || poolId != null || physicalNetworkId != null) { - // this is ugly, to work with legacy code, we need to re-construct the DeploymentPlan hard-codely - // this has to be refactored together with migrating legacy code into the new way - ReservationContext context = null; - if(reservationId != null) { - Journal journal = new Journal.LogJournal("VmWorkStart", s_logger); - context = new ReservationContextImpl(reservationId, journal, - CallContext.current().getCallingUser(), - CallContext.current().getCallingAccount()); - } - - DeploymentPlan plan = new DataCenterDeployment( - dcId, podId, clusterId, hostId, poolId, physicalNetworkId, - context); - return plan; - } - - return null; - } + public DeploymentPlan getPlan() { - public void setPlan(DeploymentPlan plan) { - if(plan != null) { - dcId = plan.getDataCenterId(); - podId = plan.getPodId(); - clusterId = plan.getClusterId(); - hostId = plan.getHostId(); - poolId = plan.getPoolId(); - physicalNetworkId = plan.getPhysicalNetworkId(); - avoids = plan.getAvoids(); - - if(plan.getReservationContext() != null) - reservationId = plan.getReservationContext().getReservationId(); - } - } + if (podId != null || clusterId != null || hostId != null || poolId != null || physicalNetworkId != null) { + // this is ugly, to work with legacy code, we need to re-construct the DeploymentPlan hard-codely + // this has to be refactored together with migrating legacy code into the new way + ReservationContext context = null; + if (reservationId != null) { + Journal journal = new Journal.LogJournal("VmWorkStart", s_logger); + context = new ReservationContextImpl(reservationId, journal, + CallContext.current().getCallingUser(), + CallContext.current().getCallingAccount()); + } - public Map getRawParams() { - return rawParams; - } + DeploymentPlan plan = new DataCenterDeployment( + dcId, podId, clusterId, hostId, poolId, physicalNetworkId, + context); + return plan; + } - public void setRawParams(Map params) { - rawParams = params; - } - - public Map getParams() { - Map map = new HashMap(); - - if(rawParams != null) { - for(Map.Entry entry : rawParams.entrySet()) { - VirtualMachineProfile.Param key = new VirtualMachineProfile.Param(entry.getKey()); - Object val = JobSerializerHelper.fromObjectSerializedString(entry.getValue()); - map.put(key, val); - } - } - - return map; - } - - public void setParams(Map params) { - if(params != null) { - rawParams = new HashMap(); - for(Map.Entry entry : params.entrySet()) { - rawParams.put(entry.getKey().getName(), JobSerializerHelper.toObjectSerializedString( - entry.getValue() instanceof Serializable ? (Serializable)entry.getValue() : entry.getValue().toString())); - } - } - } + return null; + } + + public void setPlan(DeploymentPlan plan) { + if (plan != null) { + dcId = plan.getDataCenterId(); + podId = plan.getPodId(); + clusterId = plan.getClusterId(); + hostId = plan.getHostId(); + poolId = plan.getPoolId(); + physicalNetworkId = plan.getPhysicalNetworkId(); + avoids = plan.getAvoids(); + + if (plan.getReservationContext() != null) + reservationId = plan.getReservationContext().getReservationId(); + } + } + + public Map getRawParams() { + return rawParams; + } + + public void setRawParams(Map params) { + rawParams = params; + } + + public Map getParams() { + Map map = new HashMap(); + + if (rawParams != null) { + for (Map.Entry entry : rawParams.entrySet()) { + VirtualMachineProfile.Param key = new VirtualMachineProfile.Param(entry.getKey()); + Object val = JobSerializerHelper.fromObjectSerializedString(entry.getValue()); + map.put(key, val); + } + } + + return map; + } + + public void setParams(Map params) { + if (params != null) { + rawParams = new HashMap(); + for (Map.Entry entry : params.entrySet()) { + rawParams.put(entry.getKey().getName(), JobSerializerHelper.toObjectSerializedString( + entry.getValue() instanceof Serializable ? (Serializable)entry.getValue() : entry.getValue().toString())); + } + } + } } diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkStop.java b/engine/orchestration/src/com/cloud/vm/VmWorkStop.java index ff3bfb9481a..6d4148000c8 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkStop.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkStop.java @@ -17,10 +17,10 @@ package com.cloud.vm; public class VmWorkStop extends VmWork { - private static final long serialVersionUID = 202908740486785251L; - - private final boolean cleanup; - + private static final long serialVersionUID = 202908740486785251L; + + private final boolean cleanup; + public VmWorkStop(long userId, long accountId, long vmId, String handlerName, boolean cleanup) { super(userId, accountId, vmId, handlerName); this.cleanup = cleanup; diff --git a/engine/orchestration/src/com/cloud/vm/VmWorkStorageMigration.java b/engine/orchestration/src/com/cloud/vm/VmWorkStorageMigration.java index bcee88a22f0..2b2f8e841ed 100644 --- a/engine/orchestration/src/com/cloud/vm/VmWorkStorageMigration.java +++ b/engine/orchestration/src/com/cloud/vm/VmWorkStorageMigration.java @@ -25,11 +25,11 @@ public class VmWorkStorageMigration extends VmWork { public VmWorkStorageMigration(long userId, long accountId, long vmId, String handlerName, StoragePool destPool) { super(userId, accountId, vmId, handlerName); - - this.destPool = destPool; + + this.destPool = destPool; } public StoragePool getDestStoragePool() { - return destPool; + return destPool; } } From 5e552ec66bcc2af13be4de5ba2ffbfc6bb11f32d Mon Sep 17 00:00:00 2001 From: Brian Federle Date: Mon, 16 Dec 2013 16:12:43 -0800 Subject: [PATCH 010/312] Reduce width of multiselect column to prevent overflow --- ui/css/cloudstack3.css | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/ui/css/cloudstack3.css b/ui/css/cloudstack3.css index b06b79a795a..b56cf7fad25 100644 --- a/ui/css/cloudstack3.css +++ b/ui/css/cloudstack3.css @@ -176,14 +176,18 @@ table tbody td.truncated > span { /** Multiselect*/ table thead th.multiselect, table tbody td.multiselect { - width: 40px; - min-width: 40px; - max-width: 40px; + width: 20px; + min-width: 20px; + max-width: 20px; } table thead th.multiselect input, table tbody td.multiselect input { margin: 0; + /*+placement:shift -8px 0px;*/ + position: relative; + left: -8px; + top: 0px; } table thead th.multiselect input { From caba41747dac3071c61c5e3b3def5d845ae7eea3 Mon Sep 17 00:00:00 2001 From: Brian Federle Date: Mon, 16 Dec 2013 16:16:29 -0800 Subject: [PATCH 011/312] Truncate event type column to prevent wrapping --- ui/scripts/events.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ui/scripts/events.js b/ui/scripts/events.js index fdc496e2c45..2731cb65369 100644 --- a/ui/scripts/events.js +++ b/ui/scripts/events.js @@ -43,7 +43,8 @@ label: 'label.level' }, type: { - label: 'Type' + label: 'Type', + truncate: true }, domain: { label: 'label.domain' From 2a50fd0b0b0bdbf457b29cef46f3e7d2380d9803 Mon Sep 17 00:00:00 2001 From: Brian Federle Date: Mon, 16 Dec 2013 16:21:59 -0800 Subject: [PATCH 012/312] Fix wrapping for port forwarding multiedit fields in FF --- ui/css/cloudstack3.css | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ui/css/cloudstack3.css b/ui/css/cloudstack3.css index b56cf7fad25..4fb9098ac26 100644 --- a/ui/css/cloudstack3.css +++ b/ui/css/cloudstack3.css @@ -7619,10 +7619,10 @@ div.container div.panel div#details-tab-addloadBalancer.detail-group div.loadBal } .multi-edit .range input { - width: 41px; - margin-left: 13px; + width: 35px; + margin-left: 6px; + margin-right: 2px; position: relative; - float: left; } .multi-edit .range label { From f919441c347f51d618f70c8044b6c00a7a8f72f2 Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Mon, 16 Dec 2013 16:29:59 -0800 Subject: [PATCH 013/312] CLOUDSTACK-5252: UI > Infrastructure > Virtual Routers > group by zone/pod/cluster > include project-related routers into calculation. --- ui/scripts/system.js | 497 ++++++++++++------------------------------- 1 file changed, 137 insertions(+), 360 deletions(-) diff --git a/ui/scripts/system.js b/ui/scripts/system.js index f6a50a46abe..6fdb51f3623 100644 --- a/ui/scripts/system.js +++ b/ui/scripts/system.js @@ -8797,66 +8797,12 @@ dataType: "json", async: true, success: function(json) { - var zoneObjs = json.listzonesresponse.zone; - if (zoneObjs != null) { - for (var i = 0; i < zoneObjs.length; i++) { - var currentPage = 1; - $.ajax({ - url: createURL('listRouters'), - data: { - zoneid: zoneObjs[i].id, - listAll: true, - page: currentPage, - pagesize: pageSize //global variable - }, - async: false, - success: function(json) { - if (json.listroutersresponse.count != undefined) { - zoneObjs[i].routerCount = json.listroutersresponse.count; - var routerCountFromAllPages = json.listroutersresponse.count; - var routerCountFromFirstPageToCurrentPage = json.listroutersresponse.router.length; - var routerRequiresUpgrade = 0; - var callListApiWithPage = function() { - $.ajax({ - url: createURL('listRouters'), - async: false, - data: { - zoneid: zoneObjs[i].id, - listAll: true, - page: currentPage, - pagesize: pageSize //global variable - }, - success: function(json) { - routerCountFromFirstPageToCurrentPage += json.listroutersresponse.router.length; - var items = json.listroutersresponse.router; - for (var i = 0; i < items.length; i++) { - if (items[i].requiresupgrade) { - routerRequiresUpgrade++; - } - } - if (routerCountFromFirstPageToCurrentPage < routerCountFromAllPages) { - currentPage++; - callListApiWithPage(); - } - } - }); - } - if (routerCountFromFirstPageToCurrentPage < routerCountFromAllPages) { - currentPage++; - callListApiWithPage(); - } - zoneObjs[i].routerRequiresUpgrade = routerRequiresUpgrade; - - } else { - zoneObjs[i].routerCount = 0; - zoneObjs[i].routerRequiresUpgrade = 0; - } - } - }); - } + var groupbyObjs = json.listzonesresponse.zone; + if (groupbyObjs != null) { + addExtraPropertiesToGroupbyObjects(groupbyObjs, 'zoneid'); } args.response.success({ - data: zoneObjs + data: groupbyObjs }); } }); @@ -8919,67 +8865,12 @@ } } }], - dataProvider: function(args) { - var currentPage = 1; - $.ajax({ - url: createURL('listRouters'), - data: { - zoneid: args.context.routerGroupByZone[0].id, - listAll: true, - page: currentPage, - pagesize: pageSize //global variable - }, - async: false, - success: function(json) { - if (json.listroutersresponse.count != undefined) { - args.context.routerGroupByZone[0].routerCount = json.listroutersresponse.count; - var routerCountFromAllPages = json.listroutersresponse.count; - var routerCountFromFirstPageToCurrentPage = json.listroutersresponse.router.length; - var routerRequiresUpgrade = 0; - var callListApiWithPage = function() { - $.ajax({ - url: createURL('listRouters'), - async: false, - data: { - zoneid: args.context.routerGroupByZone[0].id, - listAll: true, - page: currentPage, - pagesize: pageSize //global variable - }, - success: function(json) { - routerCountFromFirstPageToCurrentPage += json.listroutersresponse.router.length; - var items = json.listroutersresponse.router; - for (var i = 0; i < items.length; i++) { - if (items[i].requiresupgrade) { - routerRequiresUpgrade++; - } - } - if (routerCountFromFirstPageToCurrentPage < routerCountFromAllPages) { - currentPage++; - callListApiWithPage(); - } - } - }); - } - if (routerCountFromFirstPageToCurrentPage < routerCountFromAllPages) { - currentPage++; - callListApiWithPage(); - } - args.context.routerGroupByZone[0].routerRequiresUpgrade = routerRequiresUpgrade; - - } else { - args.context.routerGroupByZone[0].routerCount = 0; - args.context.routerGroupByZone[0].routerRequiresUpgrade = 0; - } - } - }); - - setTimeout(function() { - args.response.success({ - data: args.context.routerGroupByZone[0], - actionFilter: routerGroupActionfilter - }) - }); + dataProvider: function(args) { + addExtraPropertiesToGroupbyObject(args.context.routerGroupByZone[0], 'zoneid'); + args.response.success({ + data: args.context.routerGroupByZone[0], + actionFilter: routerGroupActionfilter + }); } } } @@ -9029,67 +8920,13 @@ dataType: "json", async: true, success: function (json) { - var podObjs = json.listpodsresponse.pod; - if (podObjs != null) { - for (var i = 0; i < podObjs.length; i++) { - var currentPage = 1; - $.ajax({ - url: createURL('listRouters'), - data: { - podid: podObjs[i].id, - listAll: true, - page: currentPage, - pagesize: pageSize //global variable - }, - async: false, - success: function (json) { - if (json.listroutersresponse.count != undefined) { - podObjs[i].routerCount = json.listroutersresponse.count; - var routerCountFromAllPages = json.listroutersresponse.count; - var routerCountFromFirstPageToCurrentPage = json.listroutersresponse.router.length; - var routerRequiresUpgrade = 0; - var callListApiWithPage = function () { - $.ajax({ - url: createURL('listRouters'), - async: false, - data: { - podid: podObjs[i].id, - listAll: true, - page: currentPage, - pagesize: pageSize //global variable - }, - success: function (json) { - routerCountFromFirstPageToCurrentPage += json.listroutersresponse.router.length; - var items = json.listroutersresponse.router; - for (var i = 0; i < items.length; i++) { - if (items[i].requiresupgrade) { - routerRequiresUpgrade++; - } - } - if (routerCountFromFirstPageToCurrentPage < routerCountFromAllPages) { - currentPage++; - callListApiWithPage(); - } - } - }); - } - if (routerCountFromFirstPageToCurrentPage < routerCountFromAllPages) { - currentPage++; - callListApiWithPage(); - } - podObjs[i].routerRequiresUpgrade = routerRequiresUpgrade; - - } else { - podObjs[i].routerCount = 0; - podObjs[i].routerRequiresUpgrade = 0; - } - } - }); - } - } - args.response.success({ - data: podObjs - }); + var groupbyObjs = json.listpodsresponse.pod; + if (groupbyObjs != null) { + addExtraPropertiesToGroupbyObjects(groupbyObjs, 'podid'); + } + args.response.success({ + data: groupbyObjs + }); } }); }, @@ -9155,65 +8992,11 @@ } }], dataProvider: function (args) { - var currentPage = 1; - $.ajax({ - url: createURL('listRouters'), - data: { - podid: args.context.routerGroupByPod[0].id, - listAll: true, - page: currentPage, - pagesize: pageSize //global variable - }, - async: false, - success: function (json) { - if (json.listroutersresponse.count != undefined) { - args.context.routerGroupByPod[0].routerCount = json.listroutersresponse.count; - var routerCountFromAllPages = json.listroutersresponse.count; - var routerCountFromFirstPageToCurrentPage = json.listroutersresponse.router.length; - var routerRequiresUpgrade = 0; - var callListApiWithPage = function () { - $.ajax({ - url: createURL('listRouters'), - async: false, - data: { - podid: args.context.routerGroupByPod[0].id, - listAll: true, - page: currentPage, - pagesize: pageSize //global variable - }, - success: function (json) { - routerCountFromFirstPageToCurrentPage += json.listroutersresponse.router.length; - var items = json.listroutersresponse.router; - for (var i = 0; i < items.length; i++) { - if (items[i].requiresupgrade) { - routerRequiresUpgrade++; - } - } - if (routerCountFromFirstPageToCurrentPage < routerCountFromAllPages) { - currentPage++; - callListApiWithPage(); - } - } - }); - } - if (routerCountFromFirstPageToCurrentPage < routerCountFromAllPages) { - currentPage++; - callListApiWithPage(); - } - args.context.routerGroupByPod[0].routerRequiresUpgrade = routerRequiresUpgrade; - - } else { - args.context.routerGroupByPod[0].routerCount = 0; - args.context.routerGroupByPod[0].routerRequiresUpgrade = 0; - } - } - }); - setTimeout(function() { - args.response.success({ - data: args.context.routerGroupByPod[0], - actionFilter: routerGroupActionfilter - }); - }); + addExtraPropertiesToGroupbyObject(args.context.routerGroupByPod[0], 'podid'); + args.response.success({ + data: args.context.routerGroupByPod[0], + actionFilter: routerGroupActionfilter + }); } } } @@ -9263,67 +9046,13 @@ dataType: "json", async: true, success: function (json) { - var clusterObjs = json.listclustersresponse.cluster; - if (clusterObjs != null) { - for (var i = 0; i < clusterObjs.length; i++) { - var currentPage = 1; - $.ajax({ - url: createURL('listRouters'), - data: { - clusterid: clusterObjs[i].id, - listAll: true, - page: currentPage, - pagesize: pageSize //global variable - }, - async: false, - success: function (json) { - if (json.listroutersresponse.count != undefined) { - clusterObjs[i].routerCount = json.listroutersresponse.count; - var routerCountFromAllPages = json.listroutersresponse.count; - var routerCountFromFirstPageToCurrentPage = json.listroutersresponse.router.length; - var routerRequiresUpgrade = 0; - var callListApiWithPage = function () { - $.ajax({ - url: createURL('listRouters'), - async: false, - data: { - clusterid: clusterObjs[i].id, - listAll: true, - page: currentPage, - pagesize: pageSize //global variable - }, - success: function (json) { - routerCountFromFirstPageToCurrentPage += json.listroutersresponse.router.length; - var items = json.listroutersresponse.router; - for (var i = 0; i < items.length; i++) { - if (items[i].requiresupgrade) { - routerRequiresUpgrade++; - } - } - if (routerCountFromFirstPageToCurrentPage < routerCountFromAllPages) { - currentPage++; - callListApiWithPage(); - } - } - }); - } - if (routerCountFromFirstPageToCurrentPage < routerCountFromAllPages) { - currentPage++; - callListApiWithPage(); - } - clusterObjs[i].routerRequiresUpgrade = routerRequiresUpgrade; - - } else { - clusterObjs[i].routerCount = 0; - clusterObjs[i].routerRequiresUpgrade = 0; - } - } - }); - } - } - args.response.success({ - data: clusterObjs - }); + var groupbyObjs = json.listclustersresponse.cluster; + if (groupbyObjs != null) { + addExtraPropertiesToGroupbyObjects(groupbyObjs, 'clusterid'); + } + args.response.success({ + data: groupbyObjs + }); } }); }, @@ -9391,66 +9120,12 @@ label: 'zone' } }], - dataProvider: function (args) { - var currentPage = 1; - $.ajax({ - url: createURL('listRouters'), - data: { - clusterid: args.context.routerGroupByCluster[0].id, - listAll: true, - page: currentPage, - pagesize: pageSize //global variable - }, - async: false, - success: function (json) { - if (json.listroutersresponse.count != undefined) { - args.context.routerGroupByCluster[0].routerCount = json.listroutersresponse.count; - var routerCountFromAllPages = json.listroutersresponse.count; - var routerCountFromFirstPageToCurrentPage = json.listroutersresponse.router.length; - var routerRequiresUpgrade = 0; - var callListApiWithPage = function () { - $.ajax({ - url: createURL('listRouters'), - async: false, - data: { - clusterid: args.context.routerGroupByCluster[0].id, - listAll: true, - page: currentPage, - pagesize: pageSize //global variable - }, - success: function (json) { - routerCountFromFirstPageToCurrentPage += json.listroutersresponse.router.length; - var items = json.listroutersresponse.router; - for (var i = 0; i < items.length; i++) { - if (items[i].requiresupgrade) { - routerRequiresUpgrade++; - } - } - if (routerCountFromFirstPageToCurrentPage < routerCountFromAllPages) { - currentPage++; - callListApiWithPage(); - } - } - }); - } - if (routerCountFromFirstPageToCurrentPage < routerCountFromAllPages) { - currentPage++; - callListApiWithPage(); - } - args.context.routerGroupByCluster[0].routerRequiresUpgrade = routerRequiresUpgrade; - - } else { - args.context.routerGroupByCluster[0].routerCount = 0; - args.context.routerGroupByCluster[0].routerRequiresUpgrade = 0; - } - } - }); - setTimeout(function() { - args.response.success({ - data: args.context.routerGroupByCluster[0], - actionFilter: routerGroupActionfilter - }); - }); + dataProvider: function (args) { + addExtraPropertiesToGroupbyObject(args.context.routerGroupByCluster[0], 'clusterid'); + args.response.success({ + data: args.context.routerGroupByCluster[0], + actionFilter: routerGroupActionfilter + }); } } } @@ -18912,4 +18587,106 @@ return []; }; + + function addExtraPropertiesToGroupbyObjects(groupbyObjs, groupbyId) { + for (var i = 0; i < groupbyObjs.length; i++) { + addExtraPropertiesToGroupbyObject(groupbyObjs[i], groupbyId); + } + } + + function addExtraPropertiesToGroupbyObject(groupbyObj, groupbyId) { + var currentPage = 1; + + var listRoutersData = { + listAll: true, + pagesize: pageSize //global variable + }; + listRoutersData[groupbyId] = groupbyObj.id; + + $.ajax({ + url: createURL('listRouters'), + data: $.extend({}, listRoutersData, { + page: currentPage + }), + async: false, + success: function(json) { + if (json.listroutersresponse.count != undefined) { + var routerCountFromAllPages = json.listroutersresponse.count; + var routerCountFromFirstPageToCurrentPage = json.listroutersresponse.router.length; + var routerRequiresUpgrade = 0; + + $.ajax({ + url: createURL('listRouters'), + data: $.extend({}, listRoutersData, { + page: currentPage, + projectid: -1 + }), + async: false, + success: function(json) { + if (json.listroutersresponse.count != undefined) { + routerCountFromAllPages += json.listroutersresponse.count; + groupbyObj.routerCount = routerCountFromAllPages; + + routerCountFromFirstPageToCurrentPage += json.listroutersresponse.router.length; + } + } + }); + + var callListApiWithPage = function() { + $.ajax({ + url: createURL('listRouters'), + async: false, + data: $.extend({}, listRoutersData, { + page: currentPage + }), + success: function(json) { + routerCountFromFirstPageToCurrentPage += json.listroutersresponse.router.length; + var items = json.listroutersresponse.router; + for (var k = 0; k < items.length; k++) { + if (items[k].requiresupgrade) { + routerRequiresUpgrade++; + } + } + + $.ajax({ + url: createURL('listRouters'), + async: false, + data: $.extend({}, listRoutersData, { + page: currentPage, + projectid: -1 + }), + success: function(json) { + routerCountFromFirstPageToCurrentPage += json.listroutersresponse.router.length; + var items = json.listroutersresponse.router; + for (var k = 0; k < items.length; k++) { + if (items[k].requiresupgrade) { + routerRequiresUpgrade++; + } + } + } + }); + + if (routerCountFromFirstPageToCurrentPage < routerCountFromAllPages) { + currentPage++; + callListApiWithPage(); + } + } + }); + } + + if (routerCountFromFirstPageToCurrentPage < routerCountFromAllPages) { + currentPage++; + callListApiWithPage(); + } + + groupbyObj.routerRequiresUpgrade = routerRequiresUpgrade; + + } else { + groupbyObj.routerCount = 0; + groupbyObj.routerRequiresUpgrade = 0; + } + } + }); + } + })($, cloudStack); From 5594ea990f5cf9086d4ef410eca69bd69dec2b0f Mon Sep 17 00:00:00 2001 From: Likitha Shetty Date: Mon, 16 Dec 2013 20:08:32 +0530 Subject: [PATCH 014/312] CLOUDSTACK-5519. [VMWARE] Cancel vCenter tasks if the task invoked by CloudStack failes with timeout error Conflicts: vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java --- .../hypervisor/vmware/util/VmwareClient.java | 31 ++++++++++++------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java index 0a44a72289a..d7aaabcae1c 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java @@ -28,6 +28,9 @@ import javax.net.ssl.HttpsURLConnection; import javax.net.ssl.SSLSession; import javax.xml.ws.BindingProvider; import javax.xml.ws.handler.MessageContext; +import javax.xml.ws.WebServiceException; + +import org.apache.log4j.Logger; import com.vmware.vim25.DynamicProperty; import com.vmware.vim25.InvalidCollectorVersionFaultMsg; @@ -59,6 +62,7 @@ import com.vmware.vim25.VimService; * */ public class VmwareClient { + private static final Logger s_logger = Logger.getLogger(VmwareClient.class); private static class TrustAllTrustManager implements javax.net.ssl.TrustManager, javax.net.ssl.X509TrustManager { @@ -313,20 +317,25 @@ public class VmwareClient { * @throws RuntimeFaultFaultMsg * @throws InvalidPropertyFaultMsg */ - public boolean waitForTask(ManagedObjectReference task) throws InvalidPropertyFaultMsg, RuntimeFaultFaultMsg, InvalidCollectorVersionFaultMsg { + public boolean waitForTask(ManagedObjectReference task) throws InvalidPropertyFaultMsg, RuntimeFaultFaultMsg, InvalidCollectorVersionFaultMsg, Exception { boolean retVal = false; - // info has a property - state for state of the task - Object[] result = - waitForValues(task, new String[] {"info.state", "info.error"}, new String[] {"state"}, new Object[][] {new Object[] {TaskInfoState.SUCCESS, - TaskInfoState.ERROR}}); - - if (result[0].equals(TaskInfoState.SUCCESS)) { - retVal = true; - } - if (result[1] instanceof LocalizedMethodFault) { - throw new RuntimeException(((LocalizedMethodFault)result[1]).getLocalizedMessage()); + try { + // info has a property - state for state of the task + Object[] result = waitForValues(task, new String[] { "info.state", "info.error" }, new String[] { "state" }, new Object[][] { new Object[] { + TaskInfoState.SUCCESS, TaskInfoState.ERROR } }); + + if (result[0].equals(TaskInfoState.SUCCESS)) { + retVal = true; + } + if (result[1] instanceof LocalizedMethodFault) { + throw new RuntimeException(((LocalizedMethodFault) result[1]).getLocalizedMessage()); + } + } catch(WebServiceException we) { + s_logger.debug("Cancelling vCenter task because task failed with " + we.getLocalizedMessage()); + getService().cancelTask(task); + throw new RuntimeException("vCenter task failed due to " + we.getLocalizedMessage()); } return retVal; } From 759d484d9a707fb9cc41eda37bfbad72becd1d92 Mon Sep 17 00:00:00 2001 From: Devdeep Singh Date: Tue, 17 Dec 2013 14:04:05 +0530 Subject: [PATCH 015/312] CLOUDSTACK-5489: Query the user name and add it to the command while mounting a cifs share. --- .../cloud/hypervisor/hyperv/manager/HypervManagerImpl.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/hypervisors/hyperv/src/com/cloud/hypervisor/hyperv/manager/HypervManagerImpl.java b/plugins/hypervisors/hyperv/src/com/cloud/hypervisor/hyperv/manager/HypervManagerImpl.java index e6d8e1ad4fe..a30eb7df005 100644 --- a/plugins/hypervisors/hyperv/src/com/cloud/hypervisor/hyperv/manager/HypervManagerImpl.java +++ b/plugins/hypervisors/hyperv/src/com/cloud/hypervisor/hyperv/manager/HypervManagerImpl.java @@ -230,11 +230,15 @@ public class HypervManagerImpl implements HypervManager { Script script = null; String result = null; if (scheme.equals("cifs")) { + String user = System.getProperty("user.name"); Script command = new Script(true, "mount", _timeout, s_logger); command.add("-t", "cifs"); command.add(path); command.add(mountPoint); - command.add("-o", "uid=`whoami`,gid=`whoami`"); + + if (user != null) { + command.add("-o", "uid=" + user + ",gid=" + user); + } if (query != null) { query = query.replace('&', ','); From 025335a3ab3d7ddc95759c3099b4e17c03a59140 Mon Sep 17 00:00:00 2001 From: Likitha Shetty Date: Tue, 17 Dec 2013 14:39:58 +0530 Subject: [PATCH 016/312] Fixing compile error found by checkstyle --- .../src/com/cloud/hypervisor/vmware/util/VmwareClient.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java index d7aaabcae1c..3c2c81d8a5e 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareClient.java @@ -325,7 +325,7 @@ public class VmwareClient { // info has a property - state for state of the task Object[] result = waitForValues(task, new String[] { "info.state", "info.error" }, new String[] { "state" }, new Object[][] { new Object[] { TaskInfoState.SUCCESS, TaskInfoState.ERROR } }); - + if (result[0].equals(TaskInfoState.SUCCESS)) { retVal = true; } From 3604f87c8d4a55a11d103b3b040bb3c36af33657 Mon Sep 17 00:00:00 2001 From: Devdeep Singh Date: Tue, 17 Dec 2013 22:53:12 +0530 Subject: [PATCH 017/312] CLOUDSTACK-5217: Attach volume fails with NPE. Made changes to make sure the hypervisor product version is reported on startup. Also made changes to fix attach and detach volume on scsi controller. --- .../HypervResource/CloudStackTypes.cs | 4 + .../HypervResourceController.cs | 53 +++++-- .../HypervResource/IWmiCallsV2.cs | 3 +- .../HypervResource/WmiCallsV2.cs | 137 ++++++++++++++---- 4 files changed, 158 insertions(+), 39 deletions(-) diff --git a/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/CloudStackTypes.cs b/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/CloudStackTypes.cs index f4d25ecfb6c..eb20d7fd082 100644 --- a/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/CloudStackTypes.cs +++ b/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/CloudStackTypes.cs @@ -406,7 +406,9 @@ namespace HypervResource public class DiskTO { public string type; + public string diskSequence = null; public TemplateObjectTO templateObjectTO = null; + public VolumeObjectTO volumeObjectTO = null; public static DiskTO ParseJson(dynamic json) { @@ -416,7 +418,9 @@ namespace HypervResource result = new DiskTO() { templateObjectTO = TemplateObjectTO.ParseJson(json.data), + volumeObjectTO = VolumeObjectTO.ParseJson(json.data), type = (string)json.type, + diskSequence = json.diskSeq }; } diff --git a/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/HypervResourceController.cs b/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/HypervResourceController.cs index b283d976c51..51a27bfdcd2 100644 --- a/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/HypervResourceController.cs +++ b/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/HypervResourceController.cs @@ -215,18 +215,29 @@ namespace HypervResource { string vmName = (string)cmd.vmName; DiskTO disk = DiskTO.ParseJson(cmd.disk); - TemplateObjectTO dataStore = disk.templateObjectTO; - if (dataStore.nfsDataStoreTO != null) + if (disk.type.Equals("ISO")) { + TemplateObjectTO dataStore = disk.templateObjectTO; NFSTO share = dataStore.nfsDataStoreTO; Utils.ConnectToRemote(share.UncPath, share.Domain, share.User, share.Password); - - // The share is mapped, now attach the iso - string isoPath = Utils.NormalizePath(Path.Combine(share.UncPath, dataStore.path)); - wmiCallsV2.AttachIso(vmName, isoPath); + string diskPath = Utils.NormalizePath(Path.Combine(share.UncPath, dataStore.path)); + wmiCallsV2.AttachIso(vmName, diskPath); result = true; } + else if (disk.type.Equals("DATADISK")) + { + VolumeObjectTO volume = disk.volumeObjectTO; + PrimaryDataStoreTO primary = volume.primaryDataStore; + Utils.ConnectToRemote(primary.UncPath, primary.Domain, primary.User, primary.Password); + string diskPath = Utils.NormalizePath(volume.FullFileName); + wmiCallsV2.AttachDisk(vmName, diskPath, disk.diskSequence); + result = true; + } + else + { + details = "Invalid disk type to be attached to vm " + vmName; + } } catch (Exception sysEx) { @@ -238,6 +249,7 @@ namespace HypervResource { result = result, details = details, + disk = cmd.disk, contextMap = contextMap }; @@ -261,16 +273,27 @@ namespace HypervResource { string vmName = (string)cmd.vmName; DiskTO disk = DiskTO.ParseJson(cmd.disk); - TemplateObjectTO dataStore = disk.templateObjectTO; - if (dataStore.nfsDataStoreTO != null) + if (disk.type.Equals("ISO")) { + TemplateObjectTO dataStore = disk.templateObjectTO; NFSTO share = dataStore.nfsDataStoreTO; - // The share is mapped, now attach the iso - string isoPath = Utils.NormalizePath(Path.Combine(share.UncPath, dataStore.path)); - wmiCallsV2.DetachDisk(vmName, isoPath); + string diskPath = Utils.NormalizePath(Path.Combine(share.UncPath, dataStore.path)); + wmiCallsV2.DetachDisk(vmName, diskPath); result = true; } + else if (disk.type.Equals("DATADISK")) + { + VolumeObjectTO volume = disk.volumeObjectTO; + PrimaryDataStoreTO primary = volume.primaryDataStore; + string diskPath = Utils.NormalizePath(volume.FullFileName); + wmiCallsV2.DetachDisk(vmName, diskPath); + result = true; + } + else + { + details = "Invalid disk type to be dettached from vm " + vmName; + } } catch (Exception sysEx) { @@ -1649,6 +1672,14 @@ namespace HypervResource strtRouteCmd.hypervisorVersion = System.Environment.OSVersion.Version.ToString(); strtRouteCmd.caps = "hvm"; + dynamic details = strtRouteCmd.hostDetails; + if (details != null) + { + string productVersion = System.Environment.OSVersion.Version.Major.ToString() + "." + + System.Environment.OSVersion.Version.Minor.ToString(); + details.Add("product_version", productVersion); + } + // Detect CPUs, speed, memory uint cores; uint mhz; diff --git a/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/IWmiCallsV2.cs b/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/IWmiCallsV2.cs index 99ce35276b7..35cdec00373 100644 --- a/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/IWmiCallsV2.cs +++ b/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/IWmiCallsV2.cs @@ -27,9 +27,10 @@ namespace HypervResource { public interface IWmiCallsV2 { - System.Management.ManagementPath AddDiskDriveToVm(ComputerSystem vm, string vhdfile, string cntrllerAddr, string driveResourceType); + System.Management.ManagementPath AddDiskDriveToIdeController(ComputerSystem vm, string vhdfile, string cntrllerAddr, string driveResourceType); ComputerSystem AddUserData(ComputerSystem vm, string userData); void AttachIso(string displayName, string iso); + void AttachDisk(string vmName, string diskPath, string addressOnController); void CreateDynamicVirtualHardDisk(ulong MaxInternalSize, string Path); SyntheticEthernetPortSettingData CreateNICforVm(ComputerSystem vm, string mac); ComputerSystem CreateVM(string name, long memory_mb, int vcpus); diff --git a/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/WmiCallsV2.cs b/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/WmiCallsV2.cs index c89f837a7a6..b7ae6656bca 100644 --- a/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/WmiCallsV2.cs +++ b/plugins/hypervisors/hyperv/DotNet/ServerResource/HypervResource/WmiCallsV2.cs @@ -207,14 +207,14 @@ namespace HypervResource return new SyntheticEthernetPortSettingData(newResourcePaths[0]); } - public const string IDE_HARDDISK_CONTROLLER = "Microsoft:Hyper-V:Emulated IDE Controller"; + public const string IDE_CONTROLLER = "Microsoft:Hyper-V:Emulated IDE Controller"; public const string SCSI_CONTROLLER = "Microsoft:Hyper-V:Synthetic SCSI Controller"; - public const string IDE_HARDDISK_DRIVE = "Microsoft:Hyper-V:Synthetic Disk Drive"; - public const string IDE_ISO_DRIVE = "Microsoft:Hyper-V:Synthetic DVD Drive"; + public const string HARDDISK_DRIVE = "Microsoft:Hyper-V:Synthetic Disk Drive"; + public const string ISO_DRIVE = "Microsoft:Hyper-V:Synthetic DVD Drive"; // TODO: names harvested from Msvm_ResourcePool, not clear how to create new instances - public const string IDE_ISO_DISK = "Microsoft:Hyper-V:Virtual CD/DVD Disk"; // For IDE_ISO_DRIVE - public const string IDE_HARDDISK_DISK = "Microsoft:Hyper-V:Virtual Hard Disk"; // For IDE_HARDDISK_DRIVE + public const string ISO_DISK = "Microsoft:Hyper-V:Virtual CD/DVD Disk"; // For IDE_ISO_DRIVE + public const string HARDDISK_DISK = "Microsoft:Hyper-V:Virtual Hard Disk"; // For IDE_HARDDISK_DRIVE /// /// Create new VM. By default we start it. @@ -280,7 +280,7 @@ namespace HypervResource var newVm = CreateVM(vmName, memSize, vcpus); // Add a SCSI controller for attaching/detaching data volumes. - AddScsiControllerToVm(newVm); + AddScsiController(newVm); foreach (var diskDrive in diskDrives) { @@ -345,11 +345,11 @@ namespace HypervResource switch (driveType) { case "ROOT": ideCtrllr = "0"; - driveResourceType = IDE_HARDDISK_DRIVE; + driveResourceType = HARDDISK_DRIVE; break; case "ISO": ideCtrllr = "1"; - driveResourceType = IDE_ISO_DRIVE; + driveResourceType = ISO_DRIVE; break; default: // TODO: double check exception type @@ -362,7 +362,7 @@ namespace HypervResource } logger.DebugFormat("Create disk type {1} (Named: {0}), on vm {2} {3}", diskName, driveResourceType, vmName, string.IsNullOrEmpty(vhdFile) ? " no disk to insert" : ", inserting disk" +vhdFile ); - AddDiskDriveToVm(newVm, vhdFile, ideCtrllr, driveResourceType); + AddDiskDriveToIdeController(newVm, vhdFile, ideCtrllr, driveResourceType); if (isoPath != null) { AttachIso(vmName, isoPath); @@ -566,16 +566,32 @@ namespace HypervResource public void patchSystemVmIso(String vmName, String systemVmIso) { ComputerSystem vmObject = GetComputerSystem(vmName); - AddDiskDriveToVm(vmObject, "", "1", IDE_ISO_DRIVE); + AddDiskDriveToIdeController(vmObject, "", "1", ISO_DRIVE); AttachIso(vmName, systemVmIso); } + public void AttachDisk(string vmName, string diskPath, string addressOnController) + { + logger.DebugFormat("Got request to attach disk {0} to vm {1}", diskPath, vmName); + + ComputerSystem vm = GetComputerSystem(vmName); + if (vm == null) + { + logger.DebugFormat("VM {0} not found", vmName); + return; + } + else + { + ManagementPath newDrivePath = AttachDiskDriveToScsiController(vm, addressOnController); + InsertDiskImage(vm, diskPath, HARDDISK_DISK, newDrivePath); + } + } /// /// /// /// IDE_HARDDISK_DRIVE or IDE_ISO_DRIVE - public ManagementPath AddDiskDriveToVm(ComputerSystem vm, string vhdfile, string cntrllerAddr, string driveResourceType) + public ManagementPath AddDiskDriveToIdeController(ComputerSystem vm, string vhdfile, string cntrllerAddr, string driveResourceType) { logger.DebugFormat("Creating DISK for VM {0} (GUID {1}) by attaching {2}", vm.ElementName, @@ -585,11 +601,11 @@ namespace HypervResource // Determine disk type for drive and assert drive type valid string diskResourceSubType = null; switch(driveResourceType) { - case IDE_HARDDISK_DRIVE: - diskResourceSubType = IDE_HARDDISK_DISK; + case HARDDISK_DRIVE: + diskResourceSubType = HARDDISK_DISK; break; - case IDE_ISO_DRIVE: - diskResourceSubType = IDE_ISO_DISK; + case ISO_DRIVE: + diskResourceSubType = ISO_DISK; break; default: var errMsg = string.Format( @@ -602,7 +618,7 @@ namespace HypervResource throw ex; } - ManagementPath newDrivePath = AttachNewDriveToVm(vm, cntrllerAddr, driveResourceType); + ManagementPath newDrivePath = AttachNewDrive(vm, cntrllerAddr, driveResourceType); // If there's not disk to insert, we are done. if (String.IsNullOrEmpty(vhdfile)) @@ -629,7 +645,7 @@ namespace HypervResource } else { - RemoveStorageImageFromVm(vm, diskFileName); + RemoveStorageImage(vm, diskFileName); } } @@ -638,7 +654,7 @@ namespace HypervResource /// /// /// - private void RemoveStorageImageFromVm(ComputerSystem vm, string diskFileName) + private void RemoveStorageImage(ComputerSystem vm, string diskFileName) { // Obtain StorageAllocationSettingData for disk StorageAllocationSettingData.StorageAllocationSettingDataCollection storageSettingsObjs = StorageAllocationSettingData.GetInstances(); @@ -674,13 +690,13 @@ namespace HypervResource RemoveStorageResource(imageToRemove.Path, vm); - logger.InfoFormat("REmoved disk image {0} from VM {1} (GUID {2}): the disk image is not attached.", + logger.InfoFormat("Removed disk image {0} from VM {1} (GUID {2}): the disk image is not attached.", diskFileName, vm.ElementName, vm.Name); } - private ManagementPath AttachNewDriveToVm(ComputerSystem vm, string cntrllerAddr, string driveType) + private ManagementPath AttachNewDrive(ComputerSystem vm, string cntrllerAddr, string driveType) { // Disk drives are attached to a 'Parent' IDE controller. We IDE Controller's settings for the 'Path', which our new Disk drive will use to reference it. VirtualSystemSettingData vmSettings = GetVmSettings(vm); @@ -722,7 +738,7 @@ namespace HypervResource return newDrivePaths[0]; } - private ManagementPath AddScsiControllerToVm(ComputerSystem vm) + private ManagementPath AddScsiController(ComputerSystem vm) { // A description of the controller is created by modifying a clone of the default ResourceAllocationSettingData for scsi controller string scsiQuery = String.Format("ResourceSubType LIKE \"{0}\" AND InstanceID LIKE \"%Default\"", SCSI_CONTROLLER); @@ -754,6 +770,55 @@ namespace HypervResource return newResourcePaths[0]; } + private ManagementPath GetDiskDriveToScsiController(ComputerSystem vm, string addrOnController) + { + VirtualSystemSettingData vmSettings = GetVmSettings(vm); + var ctrller = GetScsiControllerSettings(vmSettings); + return null; + } + + private ManagementPath AttachDiskDriveToScsiController(ComputerSystem vm, string addrOnController) + { + // Disk drives are attached to a 'Parent' Scsi controller. + VirtualSystemSettingData vmSettings = GetVmSettings(vm); + var ctrller = GetScsiControllerSettings(vmSettings); + + // A description of the drive is created by modifying a clone of the default ResourceAllocationSettingData for that drive type + string defaultDriveQuery = String.Format("ResourceSubType LIKE \"{0}\" AND InstanceID LIKE \"%Default\"", HARDDISK_DRIVE); + var newDiskDriveSettings = CloneResourceAllocationSetting(defaultDriveQuery); + + // Set IDE controller and address on the controller for the new drive + newDiskDriveSettings.LateBoundObject["Parent"] = ctrller.Path.ToString(); + newDiskDriveSettings.LateBoundObject["AddressOnParent"] = addrOnController; + newDiskDriveSettings.CommitObject(); + + // Add this new disk drive to the VM + logger.DebugFormat("Creating disk drive type {0}, parent IDE controller is {1} and address on controller is {2}", + newDiskDriveSettings.ResourceSubType, + newDiskDriveSettings.Parent, + newDiskDriveSettings.AddressOnParent); + string[] newDriveResource = new string[] { newDiskDriveSettings.LateBoundObject.GetText(System.Management.TextFormat.CimDtd20) }; + ManagementPath[] newDrivePaths = AddVirtualResource(newDriveResource, vm); + + // assert + if (newDrivePaths.Length != 1) + { + var errMsg = string.Format( + "Failed to add disk drive type {3} to VM {0} (GUID {1}): number of resource created {2}", + vm.ElementName, + vm.Name, + newDrivePaths.Length, + HARDDISK_DRIVE); + var ex = new WmiException(errMsg); + logger.Error(errMsg, ex); + throw ex; + } + logger.DebugFormat("New disk drive type {0} WMI path is {1}s", + newDiskDriveSettings.ResourceSubType, + newDrivePaths[0].Path); + return newDrivePaths[0]; + } + private void InsertDiskImage(ComputerSystem vm, string diskImagePath, string diskResourceSubType, ManagementPath drivePath) { @@ -795,16 +860,14 @@ namespace HypervResource /// Create Msvm_StorageAllocationSettingData corresponding to the ISO image, and /// associate this with the VM's DVD drive. /// - private void AttachIsoToVm(ComputerSystem vm, string isoPath) + private void AttachIso(ComputerSystem vm, string isoPath) { // Disk drives are attached to a 'Parent' IDE controller. We IDE Controller's settings for the 'Path', which our new Disk drive will use to reference it. VirtualSystemSettingData vmSettings = GetVmSettings(vm); var driveWmiObj = GetDvdDriveSettings(vmSettings); - InsertDiskImage(vm, isoPath, IDE_ISO_DISK, driveWmiObj.Path); + InsertDiskImage(vm, isoPath, ISO_DISK, driveWmiObj.Path); } - - private static ResourceAllocationSettingData CloneResourceAllocationSetting(string wmiQuery) { var defaultDiskDriveSettingsObjs = ResourceAllocationSettingData.GetInstances(wmiQuery); @@ -834,7 +897,7 @@ namespace HypervResource } else { - AttachIsoToVm(vm, iso); + AttachIso(vm, iso); } } @@ -1827,7 +1890,7 @@ namespace HypervResource foreach (ResourceAllocationSettingData wmiObj in wmiObjCollection) { - if (wmiObj.ResourceSubType == IDE_HARDDISK_CONTROLLER && wmiObj.Address == cntrllerAddr) + if (wmiObj.ResourceSubType == IDE_CONTROLLER && wmiObj.Address == cntrllerAddr) { return wmiObj; } @@ -1842,6 +1905,26 @@ namespace HypervResource throw ex; } + public ResourceAllocationSettingData GetScsiControllerSettings(VirtualSystemSettingData vmSettings) + { + var wmiObjCollection = GetResourceAllocationSettings(vmSettings); + + foreach (ResourceAllocationSettingData wmiObj in wmiObjCollection) + { + if (wmiObj.ResourceSubType == SCSI_CONTROLLER) + { + return wmiObj; + } + } + + var errMsg = string.Format( + "Cannot find the Microsoft Synthetic SCSI Controller in VirtualSystemSettingData {1}", + vmSettings.Path.Path); + var ex = new WmiException(errMsg); + logger.Error(errMsg, ex); + throw ex; + } + /// /// VM resources, typically hardware a described by a generic MSVM_ResourceAllocationSettingData object. The hardware type being /// described is identified in two ways: in general terms using an enum in the ResourceType field, and in terms of the implementation From fa2f18d2a208dca070fb831ecaf9bb8aec372a26 Mon Sep 17 00:00:00 2001 From: Likitha Shetty Date: Tue, 17 Dec 2013 18:29:48 +0530 Subject: [PATCH 018/312] CLOUDSTACK-4875. Vmware vCenter 5.5 - System VM deployment fails During VM deployment. When base template is being cloned to create VM ROOT disk, get the disk path i.e. base file name of the VM's ROOT disk from vCenter --- .../resource/VmwareStorageProcessor.java | 37 +++++++------------ .../vmware/mo/VirtualMachineMO.java | 16 ++++++++ 2 files changed, 30 insertions(+), 23 deletions(-) diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java index 7c3e73a0a85..b55dbec9dae 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java @@ -253,16 +253,6 @@ public class VmwareStorageProcessor implements StorageProcessor { s_logger.error(msg); throw new Exception(msg); } - - s_logger.info("Move volume out of volume-wrapper VM "); - String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkName, VmwareStorageLayoutType.VMWARE, true); - String[] legacyCloudStackLayoutFilePair = - VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, true); - - dsMo.moveDatastoreFile(vmwareLayoutFilePair[0], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[0], dcMo.getMor(), true); - - dsMo.moveDatastoreFile(vmwareLayoutFilePair[1], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[1], dcMo.getMor(), true); - return true; } @@ -275,15 +265,6 @@ public class VmwareStorageProcessor implements StorageProcessor { s_logger.error(msg); throw new Exception(msg); } - - s_logger.info("Move volume out of volume-wrapper VM "); - String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkName, VmwareStorageLayoutType.VMWARE, false); - String[] legacyCloudStackLayoutFilePair = - VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, false); - - dsMo.moveDatastoreFile(vmwareLayoutFilePair[0], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[0], dcMo.getMor(), true); - - dsMo.moveDatastoreFile(vmwareLayoutFilePair[1], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[1], dcMo.getMor(), true); return true; } @@ -309,6 +290,7 @@ public class VmwareStorageProcessor implements StorageProcessor { DatastoreMO dsMo = new DatastoreMO(context, morDatastore); String vmdkName = volume.getName(); + String vmdkFileBaseName = null; if (srcStore == null) { // create a root volume for blank VM (created from ISO) String dummyVmName = this.hostService.getWorkerName(context, cmd, 0); @@ -319,8 +301,9 @@ public class VmwareStorageProcessor implements StorageProcessor { throw new Exception("Unable to create a dummy VM for volume creation"); } - String vmdkFilePair[] = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, null, vmdkName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, true // we only use the first file in the pair, linked or not will not matter - ); + vmdkFileBaseName = vmMo.getVmdkFileBaseNames().get(0); + // we only use the first file in the pair, linked or not will not matter + String vmdkFilePair[] = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, null, vmdkFileBaseName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, true); String volumeDatastorePath = vmdkFilePair[0]; synchronized (this) { s_logger.info("Delete file if exists in datastore to clear the way for creating the volume. file: " + volumeDatastorePath); @@ -353,6 +336,14 @@ public class VmwareStorageProcessor implements StorageProcessor { vmMo = new ClusterMO(context, morCluster).findVmOnHyperHost(vmdkName); assert (vmMo != null); + vmdkFileBaseName = vmMo.getVmdkFileBaseNames().get(0); // TO-DO: Support for base template containing multiple disks + s_logger.info("Move volume out of volume-wrapper VM "); + String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkFileBaseName, VmwareStorageLayoutType.VMWARE, !_fullCloneFlag); + String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkFileBaseName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, !_fullCloneFlag); + + dsMo.moveDatastoreFile(vmwareLayoutFilePair[0], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[0], dcMo.getMor(), true); + dsMo.moveDatastoreFile(vmwareLayoutFilePair[1], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[1], dcMo.getMor(), true); + s_logger.info("detach disks from volume-wrapper VM " + vmdkName); vmMo.detachAllDisks(); @@ -365,11 +356,11 @@ public class VmwareStorageProcessor implements StorageProcessor { // restoreVM - move the new ROOT disk into corresponding VM folder String vmInternalCSName = volume.getVmName(); if (dsMo.folderExists(String.format("[%s]", dsMo.getName()), vmInternalCSName)) { - VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmInternalCSName, dsMo, vmdkName); + VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmInternalCSName, dsMo, vmdkFileBaseName); } VolumeObjectTO newVol = new VolumeObjectTO(); - newVol.setPath(vmdkName); + newVol.setPath(vmdkFileBaseName); newVol.setSize(volume.getSize()); return new CopyCmdAnswer(newVol); } catch (Throwable e) { diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java index 771c4a568cf..b23b0ff6d07 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java @@ -1737,6 +1737,22 @@ public class VirtualMachineMO extends BaseMO { } } + public List getVmdkFileBaseNames() throws Exception { + List vmdkFileBaseNames = new ArrayList(); + VirtualDevice[] devices = getAllDiskDevice(); + for(VirtualDevice device : devices) { + if(device instanceof VirtualDisk) { + VirtualDeviceBackingInfo backingInfo = ((VirtualDisk)device).getBacking(); + if(backingInfo instanceof VirtualDiskFlatVer2BackingInfo) { + VirtualDiskFlatVer2BackingInfo diskBackingInfo = (VirtualDiskFlatVer2BackingInfo)backingInfo; + DatastoreFile dsBackingFile = new DatastoreFile(diskBackingInfo.getFileName()); + vmdkFileBaseNames.add(dsBackingFile.getFileBaseName()); + } + } + } + return vmdkFileBaseNames; + } + // this method relies on un-offical VMware API @Deprecated public void moveAllVmDiskFiles(DatastoreMO destDsMo, String destDsDir, boolean followDiskChain) throws Exception { From 7e4407d3dddc1117a1dc150761883e04f044494b Mon Sep 17 00:00:00 2001 From: Brian Federle Date: Tue, 17 Dec 2013 11:14:36 -0800 Subject: [PATCH 019/312] CLOUDSTACK-5532: Tag UI: Truncate long tag key/value --- ui/css/cloudstack3.css | 31 ++++++++++++++++++++++++++++++- ui/scripts/ui/widgets/tagger.js | 6 +++++- 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/ui/css/cloudstack3.css b/ui/css/cloudstack3.css index 4fb9098ac26..b1811f1ff18 100644 --- a/ui/css/cloudstack3.css +++ b/ui/css/cloudstack3.css @@ -10296,6 +10296,11 @@ div.container div.panel div#details-tab-addloadBalancer.detail-group div.loadBal color: #000000; } +.tagger ul li span.label span.value { + max-width: 100px; + overflow: hidden; +} + .tagger ul li span.label { font-size: 10px; position: relative; @@ -10307,11 +10312,35 @@ div.container div.panel div#details-tab-addloadBalancer.detail-group div.loadBal left: 6px; } +.tagger ul li span.label > span { + float: left; + display: block; + margin-top: 2px; +} + +.tagger ul li span.label > span.key { + font-weight: bold; + max-width: 134px; + overflow: hidden; + white-space: nowrap; + text-overflow: ellipsis; + margin-left: 15px; + margin-right: 5px; +} + +.tagger ul li span.label > span.value { + max-width: 160px; + overflow: hidden; + white-space: nowrap; + text-overflow: ellipsis; + margin-left: 6px; +} + .tagger ul li span.remove { width: 15px !important; overflow: hidden !important; height: 11px !important; - background: #DFDFDF url(../images/sprites.png) no-repeat -596px -1183px; + background: #DFDFDF; display: block; top: 0px !important; left: -3px !important; diff --git a/ui/scripts/ui/widgets/tagger.js b/ui/scripts/ui/widgets/tagger.js index 4a02d8ba804..673bd43ce6a 100644 --- a/ui/scripts/ui/widgets/tagger.js +++ b/ui/scripts/ui/widgets/tagger.js @@ -95,9 +95,13 @@ }, tagItem: function(title, onRemove, data) { var $li = $('
  • '); - var $label = $('').addClass('label').html(_s(title)); + var $label = $('').addClass('label'); var $remove = $('').addClass('remove').html('X'); + var $key = $('').addClass('key').html(_s(data.key)); + var $value = $('').addClass('value').html(_s(data.value)); + $label.append($key, '=', $value); + $label.attr('title', title); $remove.click(function() { if (onRemove) onRemove($li, data); }); From 95364a40225460c03fcec2eed1cbb4bc5e4a3e56 Mon Sep 17 00:00:00 2001 From: Marcus Sorensen Date: Mon, 16 Dec 2013 14:32:38 -0700 Subject: [PATCH 020/312] CLOUDSTACK-5531 Initial support for vhd, raw, vmdk image formats on KVM. Tested all formats with local and CLVM. --- api/src/com/cloud/storage/Storage.java | 2 + .../cloud/storage/template/OVAProcessor.java | 158 ++++++++++++++++++ .../cloud/storage/template/VmdkProcessor.java | 101 ++++------- .../kvm/storage/KVMStorageProcessor.java | 1 + .../kvm/storage/LibvirtStorageAdaptor.java | 4 +- .../apache/cloudstack/utils/qemu/QemuImg.java | 7 +- .../manager/VmwareStorageManagerImpl.java | 10 +- .../resource/VmwareStorageProcessor.java | 10 +- .../template/HypervisorTemplateAdapter.java | 57 +++++-- .../resource/NfsSecondaryStorageResource.java | 16 +- .../storage/template/DownloadManagerImpl.java | 13 +- ui/scripts/templates.js | 12 ++ 12 files changed, 292 insertions(+), 99 deletions(-) create mode 100644 core/src/com/cloud/storage/template/OVAProcessor.java diff --git a/api/src/com/cloud/storage/Storage.java b/api/src/com/cloud/storage/Storage.java index f1868a7a1b3..2175c9b1a0c 100755 --- a/api/src/com/cloud/storage/Storage.java +++ b/api/src/com/cloud/storage/Storage.java @@ -28,6 +28,8 @@ public class Storage { OVA(true, true, true, "ova"), VHDX(true, true, true, "vhdx"), BAREMETAL(false, false, false, "BAREMETAL"), + VMDK(true, true, false, "vmdk"), + VDI(true, true, false, "vdi"), TAR(false, false, false, "tar"); private final boolean thinProvisioned; diff --git a/core/src/com/cloud/storage/template/OVAProcessor.java b/core/src/com/cloud/storage/template/OVAProcessor.java new file mode 100644 index 00000000000..0db3bb00e0a --- /dev/null +++ b/core/src/com/cloud/storage/template/OVAProcessor.java @@ -0,0 +1,158 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.storage.template; + +import java.io.File; +import java.util.Map; + +import javax.ejb.Local; +import javax.naming.ConfigurationException; +import javax.xml.parsers.DocumentBuilderFactory; + +import org.apache.log4j.Logger; +import org.w3c.dom.Document; +import org.w3c.dom.Element; + +import com.cloud.exception.InternalErrorException; +import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.StorageLayer; +import com.cloud.utils.component.AdapterBase; +import com.cloud.utils.script.Script; + +@Local(value = Processor.class) +public class OVAProcessor extends AdapterBase implements Processor { + private static final Logger s_logger = Logger.getLogger(OVAProcessor.class); + + StorageLayer _storage; + + @Override + public FormatInfo process(String templatePath, ImageFormat format, String templateName) throws InternalErrorException { + if (format != null) { + if (s_logger.isInfoEnabled()) { + s_logger.info("We currently don't handle conversion from " + format + " to OVA."); + } + return null; + } + + s_logger.info("Template processing. templatePath: " + templatePath + ", templateName: " + templateName); + String templateFilePath = templatePath + File.separator + templateName + "." + ImageFormat.OVA.getFileExtension(); + if (!_storage.exists(templateFilePath)) { + if (s_logger.isInfoEnabled()) { + s_logger.info("Unable to find the vmware template file: " + templateFilePath); + } + return null; + } + + s_logger.info("Template processing - untar OVA package. templatePath: " + templatePath + ", templateName: " + templateName); + String templateFileFullPath = templatePath + File.separator + templateName + "." + ImageFormat.OVA.getFileExtension(); + File templateFile = new File(templateFileFullPath); + + Script command = new Script("tar", 0, s_logger); + command.add("--no-same-owner"); + command.add("-xf", templateFileFullPath); + command.setWorkDir(templateFile.getParent()); + String result = command.execute(); + if (result != null) { + s_logger.info("failed to untar OVA package due to " + result + ". templatePath: " + templatePath + ", templateName: " + templateName); + return null; + } + + FormatInfo info = new FormatInfo(); + info.format = ImageFormat.OVA; + info.filename = templateName + "." + ImageFormat.OVA.getFileExtension(); + info.size = _storage.getSize(templateFilePath); + info.virtualSize = getTemplateVirtualSize(templatePath, info.filename); + + // delete original OVA file + // templateFile.delete(); + return info; + } + + @Override + public Long getVirtualSize(File file) { + try { + long size = getTemplateVirtualSize(file.getParent(), file.getName()); + return size; + } catch (Exception e) { + + } + return file.length(); + } + + public long getTemplateVirtualSize(String templatePath, String templateName) throws InternalErrorException { + // get the virtual size from the OVF file meta data + long virtualSize = 0; + String templateFileFullPath = templatePath.endsWith(File.separator) ? templatePath : templatePath + File.separator; + templateFileFullPath += templateName.endsWith(ImageFormat.OVA.getFileExtension()) ? templateName : templateName + "." + ImageFormat.OVA.getFileExtension(); + String ovfFileName = getOVFFilePath(templateFileFullPath); + if (ovfFileName == null) { + String msg = "Unable to locate OVF file in template package directory: " + templatePath; + s_logger.error(msg); + throw new InternalErrorException(msg); + } + try { + Document ovfDoc = null; + ovfDoc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(new File(ovfFileName)); + Element disk = (Element)ovfDoc.getElementsByTagName("Disk").item(0); + virtualSize = Long.parseLong(disk.getAttribute("ovf:capacity")); + String allocationUnits = disk.getAttribute("ovf:capacityAllocationUnits"); + if ((virtualSize != 0) && (allocationUnits != null)) { + long units = 1; + if (allocationUnits.equalsIgnoreCase("KB") || allocationUnits.equalsIgnoreCase("KiloBytes") || allocationUnits.equalsIgnoreCase("byte * 2^10")) { + units = 1024; + } else if (allocationUnits.equalsIgnoreCase("MB") || allocationUnits.equalsIgnoreCase("MegaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^20")) { + units = 1024 * 1024; + } else if (allocationUnits.equalsIgnoreCase("GB") || allocationUnits.equalsIgnoreCase("GigaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^30")) { + units = 1024 * 1024 * 1024; + } + virtualSize = virtualSize * units; + } else { + throw new InternalErrorException("Failed to read capacity and capacityAllocationUnits from the OVF file: " + ovfFileName); + } + return virtualSize; + } catch (Exception e) { + String msg = "Unable to parse OVF XML document to get the virtual disk size due to" + e; + s_logger.error(msg); + throw new InternalErrorException(msg); + } + } + + private String getOVFFilePath(String srcOVAFileName) { + File file = new File(srcOVAFileName); + assert (_storage != null); + String[] files = _storage.listFiles(file.getParent()); + if (files != null) { + for (String fileName : files) { + if (fileName.toLowerCase().endsWith(".ovf")) { + File ovfFile = new File(fileName); + return file.getParent() + File.separator + ovfFile.getName(); + } + } + } + return null; + } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + _storage = (StorageLayer)params.get(StorageLayer.InstanceConfigKey); + if (_storage == null) { + throw new ConfigurationException("Unable to get storage implementation"); + } + + return true; + } +} diff --git a/core/src/com/cloud/storage/template/VmdkProcessor.java b/core/src/com/cloud/storage/template/VmdkProcessor.java index be201438a1c..2c08447e9f9 100644 --- a/core/src/com/cloud/storage/template/VmdkProcessor.java +++ b/core/src/com/cloud/storage/template/VmdkProcessor.java @@ -16,22 +16,24 @@ // under the License. package com.cloud.storage.template; +import java.io.BufferedReader; import java.io.File; +import java.io.FileReader; +import java.io.FileNotFoundException; +import java.io.IOException; import java.util.Map; +import java.util.regex.Pattern; +import java.util.regex.Matcher; import javax.ejb.Local; import javax.naming.ConfigurationException; -import javax.xml.parsers.DocumentBuilderFactory; import org.apache.log4j.Logger; -import org.w3c.dom.Document; -import org.w3c.dom.Element; import com.cloud.exception.InternalErrorException; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageLayer; import com.cloud.utils.component.AdapterBase; -import com.cloud.utils.script.Script; @Local(value = Processor.class) public class VmdkProcessor extends AdapterBase implements Processor { @@ -49,7 +51,7 @@ public class VmdkProcessor extends AdapterBase implements Processor { } s_logger.info("Template processing. templatePath: " + templatePath + ", templateName: " + templateName); - String templateFilePath = templatePath + File.separator + templateName + "." + ImageFormat.OVA.getFileExtension(); + String templateFilePath = templatePath + File.separator + templateName + "." + ImageFormat.VMDK.getFileExtension(); if (!_storage.exists(templateFilePath)) { if (s_logger.isInfoEnabled()) { s_logger.info("Unable to find the vmware template file: " + templateFilePath); @@ -57,28 +59,12 @@ public class VmdkProcessor extends AdapterBase implements Processor { return null; } - s_logger.info("Template processing - untar OVA package. templatePath: " + templatePath + ", templateName: " + templateName); - String templateFileFullPath = templatePath + File.separator + templateName + "." + ImageFormat.OVA.getFileExtension(); - File templateFile = new File(templateFileFullPath); - - Script command = new Script("tar", 0, s_logger); - command.add("--no-same-owner"); - command.add("-xf", templateFileFullPath); - command.setWorkDir(templateFile.getParent()); - String result = command.execute(); - if (result != null) { - s_logger.info("failed to untar OVA package due to " + result + ". templatePath: " + templatePath + ", templateName: " + templateName); - return null; - } - FormatInfo info = new FormatInfo(); - info.format = ImageFormat.OVA; - info.filename = templateName + "." + ImageFormat.OVA.getFileExtension(); + info.format = ImageFormat.VMDK; + info.filename = templateName + "." + ImageFormat.VMDK.getFileExtension(); info.size = _storage.getSize(templateFilePath); info.virtualSize = getTemplateVirtualSize(templatePath, info.filename); - // delete original OVA file - // templateFile.delete(); return info; } @@ -94,56 +80,37 @@ public class VmdkProcessor extends AdapterBase implements Processor { } public long getTemplateVirtualSize(String templatePath, String templateName) throws InternalErrorException { - // get the virtual size from the OVF file meta data long virtualSize = 0; String templateFileFullPath = templatePath.endsWith(File.separator) ? templatePath : templatePath + File.separator; - templateFileFullPath += templateName.endsWith(ImageFormat.OVA.getFileExtension()) ? templateName : templateName + "." + ImageFormat.OVA.getFileExtension(); - String ovfFileName = getOVFFilePath(templateFileFullPath); - if (ovfFileName == null) { - String msg = "Unable to locate OVF file in template package directory: " + templatePath; - s_logger.error(msg); - throw new InternalErrorException(msg); - } - try { - Document ovfDoc = null; - ovfDoc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(new File(ovfFileName)); - Element disk = (Element)ovfDoc.getElementsByTagName("Disk").item(0); - virtualSize = Long.parseLong(disk.getAttribute("ovf:capacity")); - String allocationUnits = disk.getAttribute("ovf:capacityAllocationUnits"); - if ((virtualSize != 0) && (allocationUnits != null)) { - long units = 1; - if (allocationUnits.equalsIgnoreCase("KB") || allocationUnits.equalsIgnoreCase("KiloBytes") || allocationUnits.equalsIgnoreCase("byte * 2^10")) { - units = 1024; - } else if (allocationUnits.equalsIgnoreCase("MB") || allocationUnits.equalsIgnoreCase("MegaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^20")) { - units = 1024 * 1024; - } else if (allocationUnits.equalsIgnoreCase("GB") || allocationUnits.equalsIgnoreCase("GigaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^30")) { - units = 1024 * 1024 * 1024; - } - virtualSize = virtualSize * units; - } else { - throw new InternalErrorException("Failed to read capacity and capacityAllocationUnits from the OVF file: " + ovfFileName); - } - return virtualSize; - } catch (Exception e) { - String msg = "Unable to parse OVF XML document to get the virtual disk size due to" + e; - s_logger.error(msg); - throw new InternalErrorException(msg); - } - } + templateFileFullPath += templateName.endsWith(ImageFormat.VMDK.getFileExtension()) ? templateName : templateName + "." + ImageFormat.VMDK.getFileExtension(); + String vmdkHeader = ""; - private String getOVFFilePath(String srcOVAFileName) { - File file = new File(srcOVAFileName); - assert (_storage != null); - String[] files = _storage.listFiles(file.getParent()); - if (files != null) { - for (String fileName : files) { - if (fileName.toLowerCase().endsWith(".ovf")) { - File ovfFile = new File(fileName); - return file.getParent() + File.separator + ovfFile.getName(); + try { + FileReader fileReader = new FileReader(templateFileFullPath); + BufferedReader bufferedReader = new BufferedReader(fileReader); + Pattern regex = Pattern.compile("(RW|RDONLY|NOACCESS) (\\d+) (FLAT|SPARSE|ZERO|VMFS|VMFSSPARSE|VMFSDRM|VMFSRAW)"); + String line = null; + while((line = bufferedReader.readLine()) != null) { + Matcher m = regex.matcher(line); + if (m.find( )) { + long sectors = Long.parseLong(m.group(2)); + virtualSize = sectors * 512; + break; } } + bufferedReader.close(); + } catch(FileNotFoundException ex) { + String msg = "Unable to open file '" + templateFileFullPath + "' " + ex.toString(); + s_logger.error(msg); + throw new InternalErrorException(msg); + } catch(IOException ex) { + String msg = "Unable read open file '" + templateFileFullPath + "' " + ex.toString(); + s_logger.error(msg); + throw new InternalErrorException(msg); } - return null; + + s_logger.debug("vmdk file had size="+virtualSize); + return virtualSize; } @Override diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index d854ca57e38..7d5d3351220 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -189,6 +189,7 @@ public class KVMStorageProcessor implements StorageProcessor { } /* Copy volume to primary storage */ + s_logger.debug("Copying template to primary storage, template format is " + tmplVol.getFormat() ); KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid()); KVMPhysicalDisk primaryVol = null; diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java index 82d0f8d41cf..f910813ec5c 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java @@ -978,6 +978,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { String sourcePath = disk.getPath(); KVMPhysicalDisk newDisk; + s_logger.debug("copyPhysicalDisk: disk size:" + disk.getSize() + ", virtualsize:" + disk.getVirtualSize()+" format:"+disk.getFormat()); if (destPool.getType() != StoragePoolType.RBD) { if (disk.getFormat() == PhysicalDiskFormat.TAR) { newDisk = destPool.createPhysicalDisk(name, PhysicalDiskFormat.DIR, disk.getVirtualSize()); @@ -1015,7 +1016,8 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { try { Map info = qemu.info(srcFile); String backingFile = info.get(new String("backing_file")); - if (sourceFormat.equals(destFormat) && backingFile == null) { + // qcow2 templates can just be copied into place + if (sourceFormat.equals(destFormat) && backingFile == null && sourcePath.endsWith(".qcow2")) { String result = Script.runSimpleBashScript("cp -f " + sourcePath + " " + destPath, timeout); if (result != null) { throw new CloudRuntimeException("Failed to create disk: " + result); diff --git a/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java b/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java index 8c23f1ee802..a948ca1899f 100644 --- a/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java +++ b/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java @@ -184,8 +184,9 @@ public class QemuImg { public void convert(QemuImgFile srcFile, QemuImgFile destFile, Map options) throws QemuImgException { Script s = new Script(_qemuImgPath, timeout); s.add("convert"); - s.add("-f"); - s.add(srcFile.getFormat().toString()); + // autodetect source format. Sometime int he future we may teach KVMPhysicalDisk about more formats, then we can explicitly pass them if necessary + //s.add("-f"); + //s.add(srcFile.getFormat().toString()); s.add("-O"); s.add(destFile.getFormat().toString()); @@ -350,4 +351,4 @@ public class QemuImg { public void resize(QemuImgFile file, long size) throws QemuImgException { this.resize(file, size, false); } -} \ No newline at end of file +} diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java index 040a4cfd8f3..cd3fe733828 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java @@ -80,7 +80,7 @@ import com.cloud.storage.JavaStorageLayer; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageLayer; import com.cloud.storage.Volume; -import com.cloud.storage.template.VmdkProcessor; +import com.cloud.storage.template.OVAProcessor; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.StringUtils; @@ -649,10 +649,10 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { clonedVm.exportVm(secondaryMountPoint + "/" + installPath, templateUniqueName, true, false); long physicalSize = new File(installFullPath + "/" + templateUniqueName + ".ova").length(); - VmdkProcessor processor = new VmdkProcessor(); + OVAProcessor processor = new OVAProcessor(); Map params = new HashMap(); params.put(StorageLayer.InstanceConfigKey, _storage); - processor.configure("VMDK Processor", params); + processor.configure("OVA Processor", params); long virtualSize = processor.getTemplateVirtualSize(installFullPath, templateUniqueName); postCreatePrivateTemplate(installFullPath, templateId, templateUniqueName, physicalSize, virtualSize); @@ -771,11 +771,11 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } long physicalSize = new File(installFullPath + "/" + templateVMDKName).length(); - VmdkProcessor processor = new VmdkProcessor(); + OVAProcessor processor = new OVAProcessor(); // long physicalSize = new File(installFullPath + "/" + templateUniqueName + ".ova").length(); Map params = new HashMap(); params.put(StorageLayer.InstanceConfigKey, _storage); - processor.configure("VMDK Processor", params); + processor.configure("OVA Processor", params); long virtualSize = processor.getTemplateVirtualSize(installFullPath, templateUniqueName); postCreatePrivateTemplate(installFullPath, templateId, templateUniqueName, physicalSize, virtualSize); diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java index b55dbec9dae..abb673328d3 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java @@ -78,7 +78,7 @@ import com.cloud.storage.JavaStorageLayer; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageLayer; import com.cloud.storage.Volume; -import com.cloud.storage.template.VmdkProcessor; +import com.cloud.storage.template.OVAProcessor; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.script.Script; @@ -617,10 +617,10 @@ public class VmwareStorageProcessor implements StorageProcessor { clonedVm.exportVm(secondaryMountPoint + "/" + installPath, templateUniqueName, true, false); long physicalSize = new File(installFullPath + "/" + templateUniqueName + ".ova").length(); - VmdkProcessor processor = new VmdkProcessor(); + OVAProcessor processor = new OVAProcessor(); Map params = new HashMap(); params.put(StorageLayer.InstanceConfigKey, _storage); - processor.configure("VMDK Processor", params); + processor.configure("OVA Processor", params); long virtualSize = processor.getTemplateVirtualSize(installFullPath, templateUniqueName); postCreatePrivateTemplate(installFullPath, templateId, templateUniqueName, physicalSize, virtualSize); @@ -841,11 +841,11 @@ public class VmwareStorageProcessor implements StorageProcessor { } long physicalSize = new File(installFullPath + "/" + templateVMDKName).length(); - VmdkProcessor processor = new VmdkProcessor(); + OVAProcessor processor = new OVAProcessor(); // long physicalSize = new File(installFullPath + "/" + templateUniqueName + ".ova").length(); Map params = new HashMap(); params.put(StorageLayer.InstanceConfigKey, _storage); - processor.configure("VMDK Processor", params); + processor.configure("OVA Processor", params); long virtualSize = processor.getTemplateVirtualSize(installFullPath, templateUniqueName); postCreatePrivateTemplate(installFullPath, templateId, templateUniqueName, physicalSize, virtualSize); diff --git a/server/src/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/com/cloud/template/HypervisorTemplateAdapter.java index 74d1ac85b4f..25e79db2bbe 100755 --- a/server/src/com/cloud/template/HypervisorTemplateAdapter.java +++ b/server/src/com/cloud/template/HypervisorTemplateAdapter.java @@ -151,21 +151,54 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { (!url.toLowerCase().endsWith("qcow2.bz2")) && (!url.toLowerCase().endsWith("qcow2.gz")) && (!url.toLowerCase().endsWith("ova")) && (!url.toLowerCase().endsWith("ova.zip")) && (!url.toLowerCase().endsWith("ova.bz2")) && (!url.toLowerCase().endsWith("ova.gz")) && (!url.toLowerCase().endsWith("tar")) && (!url.toLowerCase().endsWith("tar.zip")) && (!url.toLowerCase().endsWith("tar.bz2")) && - (!url.toLowerCase().endsWith("tar.gz")) && (!url.toLowerCase().endsWith("img")) && (!url.toLowerCase().endsWith("raw"))) { + (!url.toLowerCase().endsWith("tar.gz")) && (!url.toLowerCase().endsWith("vmdk")) && (!url.toLowerCase().endsWith("vmdk.gz")) && + (!url.toLowerCase().endsWith("vmdk.zip")) && (!url.toLowerCase().endsWith("vmdk.bz2")) && (!url.toLowerCase().endsWith("img")) && + (!url.toLowerCase().endsWith("img.gz")) && (!url.toLowerCase().endsWith("img.zip")) && (!url.toLowerCase().endsWith("img.bz2")) && + (!url.toLowerCase().endsWith("raw")) && (!url.toLowerCase().endsWith("raw.gz")) && (!url.toLowerCase().endsWith("raw.bz2")) && + (!url.toLowerCase().endsWith("raw.zip"))) { throw new InvalidParameterValueException("Please specify a valid " + format.toLowerCase()); } - if ((format.equalsIgnoreCase("vhd") && (!url.toLowerCase().endsWith("vhd") && !url.toLowerCase().endsWith("vhd.zip") && !url.toLowerCase().endsWith("vhd.bz2") && !url.toLowerCase() - .endsWith("vhd.gz"))) || - (format.equalsIgnoreCase("vhdx") && (!url.toLowerCase().endsWith("vhdx") && !url.toLowerCase().endsWith("vhdx.zip") && - !url.toLowerCase().endsWith("vhdx.bz2") && !url.toLowerCase() - .endsWith("vhdx.gz"))) || - (format.equalsIgnoreCase("qcow2") && (!url.toLowerCase().endsWith("qcow2") && !url.toLowerCase().endsWith("qcow2.zip") && - !url.toLowerCase().endsWith("qcow2.bz2") && !url.toLowerCase().endsWith("qcow2.gz"))) || - (format.equalsIgnoreCase("ova") && (!url.toLowerCase().endsWith("ova") && !url.toLowerCase().endsWith("ova.zip") && !url.toLowerCase().endsWith("ova.bz2") && !url.toLowerCase() - .endsWith("ova.gz"))) || - (format.equalsIgnoreCase("tar") && (!url.toLowerCase().endsWith("tar") && !url.toLowerCase().endsWith("tar.zip") && !url.toLowerCase().endsWith("tar.bz2") && !url.toLowerCase() - .endsWith("tar.gz"))) || (format.equalsIgnoreCase("raw") && (!url.toLowerCase().endsWith("img") && !url.toLowerCase().endsWith("raw")))) { + if ((format.equalsIgnoreCase("vhd") + && (!url.toLowerCase().endsWith("vhd") + && !url.toLowerCase().endsWith("vhd.zip") + && !url.toLowerCase().endsWith("vhd.bz2") + && !url.toLowerCase().endsWith("vhd.gz"))) + || (format.equalsIgnoreCase("vhdx") + && (!url.toLowerCase().endsWith("vhdx") + && !url.toLowerCase().endsWith("vhdx.zip") + && !url.toLowerCase().endsWith("vhdx.bz2") + && !url.toLowerCase().endsWith("vhdx.gz"))) + || (format.equalsIgnoreCase("qcow2") + && (!url.toLowerCase().endsWith("qcow2") + && !url.toLowerCase().endsWith("qcow2.zip") + && !url.toLowerCase().endsWith("qcow2.bz2") + && !url.toLowerCase().endsWith("qcow2.gz"))) + || (format.equalsIgnoreCase("ova") + && (!url.toLowerCase().endsWith("ova") + && !url.toLowerCase().endsWith("ova.zip") + && !url.toLowerCase().endsWith("ova.bz2") + && !url.toLowerCase().endsWith("ova.gz"))) + || (format.equalsIgnoreCase("tar") + && (!url.toLowerCase().endsWith("tar") + && !url.toLowerCase().endsWith("tar.zip") + && !url.toLowerCase().endsWith("tar.bz2") + && !url.toLowerCase().endsWith("tar.gz"))) + || (format.equalsIgnoreCase("raw") + && (!url.toLowerCase().endsWith("img") + && !url.toLowerCase().endsWith("img.zip") + && !url.toLowerCase().endsWith("img.bz2") + && !url.toLowerCase().endsWith("img.gz") + && !url.toLowerCase().endsWith("raw") + && !url.toLowerCase().endsWith("raw.bz2") + && !url.toLowerCase().endsWith("raw.zip") + && !url.toLowerCase().endsWith("raw.gz"))) + || (format.equalsIgnoreCase("vmdk") + && (!url.toLowerCase().endsWith("vmdk") + && !url.toLowerCase().endsWith("vmdk.zip") + && !url.toLowerCase().endsWith("vmdk.bz2") + && !url.toLowerCase().endsWith("vmdk.gz"))) + ) { throw new InvalidParameterValueException("Please specify a valid URL. URL:" + url + " is an invalid for the format " + format.toLowerCase()); } diff --git a/services/secondary-storage/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index 7c1b9c8aa95..00c1aac77d7 100755 --- a/services/secondary-storage/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -125,6 +125,7 @@ import com.cloud.storage.template.RawImageProcessor; import com.cloud.storage.template.TemplateLocation; import com.cloud.storage.template.TemplateProp; import com.cloud.storage.template.VhdProcessor; +import com.cloud.storage.template.OVAProcessor; import com.cloud.storage.template.VmdkProcessor; import com.cloud.utils.NumbersUtil; import com.cloud.utils.S3Utils; @@ -771,6 +772,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S if (ext != null) { if (ext.equalsIgnoreCase("vhd")) { return ImageFormat.VHD; + } else if (ext.equalsIgnoreCase("vhdx")) { + return ImageFormat.VHDX; } else if (ext.equalsIgnoreCase("qcow2")) { return ImageFormat.QCOW2; } else if (ext.equalsIgnoreCase("ova")) { @@ -779,6 +782,10 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S return ImageFormat.TAR; } else if (ext.equalsIgnoreCase("img") || ext.equalsIgnoreCase("raw")) { return ImageFormat.RAW; + } else if (ext.equalsIgnoreCase("vmdk")) { + return ImageFormat.VMDK; + } else if (ext.equalsIgnoreCase("vdi")) { + return ImageFormat.VDI; } } @@ -794,11 +801,13 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S } else if (format == ImageFormat.QCOW2) { processor = new QCOW2Processor(); } else if (format == ImageFormat.OVA) { - processor = new VmdkProcessor(); + processor = new OVAProcessor(); } else if (format == ImageFormat.VHD) { processor = new VhdProcessor(); } else if (format == ImageFormat.RAW) { processor = new RawImageProcessor(); + } else if (format == ImageFormat.VMDK) { + processor = new VmdkProcessor(); } if (processor == null) { @@ -840,7 +849,10 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S if (!srcFile.exists()) { srcFile = _storage.getFile(templatePath + ".ova"); if (!srcFile.exists()) { - return new CopyCmdAnswer("Can't find src file:" + templatePath); + srcFile = _storage.getFile(templatePath + ".vmdk"); + if (!srcFile.exists()) { + return new CopyCmdAnswer("Can't find src file:" + templatePath); + } } } } diff --git a/services/secondary-storage/src/org/apache/cloudstack/storage/template/DownloadManagerImpl.java b/services/secondary-storage/src/org/apache/cloudstack/storage/template/DownloadManagerImpl.java index c01537ce230..84daf27de7c 100755 --- a/services/secondary-storage/src/org/apache/cloudstack/storage/template/DownloadManagerImpl.java +++ b/services/secondary-storage/src/org/apache/cloudstack/storage/template/DownloadManagerImpl.java @@ -75,6 +75,7 @@ import com.cloud.storage.template.TemplateDownloader.Status; import com.cloud.storage.template.TemplateLocation; import com.cloud.storage.template.TemplateProp; import com.cloud.storage.template.VhdProcessor; +import com.cloud.storage.template.OVAProcessor; import com.cloud.storage.template.VmdkProcessor; import com.cloud.utils.NumbersUtil; import com.cloud.utils.component.ManagerBase; @@ -840,8 +841,8 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager if ((tInfo.getSize() == tInfo.getPhysicalSize()) && (tInfo.getInstallPath().endsWith(ImageFormat.OVA.getFileExtension()))) { try { - Processor processor = _processors.get("VMDK Processor"); - VmdkProcessor vmdkProcessor = (VmdkProcessor)processor; + Processor processor = _processors.get("OVA Processor"); + OVAProcessor vmdkProcessor = (OVAProcessor)processor; long vSize = vmdkProcessor.getTemplateVirtualSize(path, tInfo.getInstallPath().substring(tInfo.getInstallPath().lastIndexOf(File.separator) + 1)); tInfo.setSize(vSize); loc.updateVirtualSize(vSize); @@ -897,8 +898,8 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager if ((vInfo.getSize() == vInfo.getPhysicalSize()) && (vInfo.getInstallPath().endsWith(ImageFormat.OVA.getFileExtension()))) { try { - Processor processor = _processors.get("VMDK Processor"); - VmdkProcessor vmdkProcessor = (VmdkProcessor)processor; + Processor processor = _processors.get("OVA Processor"); + OVAProcessor vmdkProcessor = (OVAProcessor)processor; long vSize = vmdkProcessor.getTemplateVirtualSize(path, vInfo.getInstallPath().substring(vInfo.getInstallPath().lastIndexOf(File.separator) + 1)); vInfo.setSize(vSize); loc.updateVirtualSize(vSize); @@ -1052,6 +1053,10 @@ public class DownloadManagerImpl extends ManagerBase implements DownloadManager processor.configure("QCOW2 Processor", params); _processors.put("QCOW2 Processor", processor); + processor = new OVAProcessor(); + processor.configure("OVA Processor", params); + _processors.put("OVA Processor", processor); + processor = new VmdkProcessor(); processor.configure("VMDK Processor", params); _processors.put("VMDK Processor", processor); diff --git a/ui/scripts/templates.js b/ui/scripts/templates.js index bcceb89cf85..3854db77759 100644 --- a/ui/scripts/templates.js +++ b/ui/scripts/templates.js @@ -361,6 +361,18 @@ id: 'QCOW2', description: 'QCOW2' }); + items.push({ + id: 'RAW', + description: 'RAW' + }); + items.push({ + id: 'VHD', + description: 'VHD' + }); + items.push({ + id: 'VMDK', + description: 'VMDK' + }); } else if (args.hypervisor == "BareMetal") { //formatSelect.append(""); items.push({ From a71915c034235928b2dd7d0593f0614b74084625 Mon Sep 17 00:00:00 2001 From: Jessica Wang Date: Tue, 17 Dec 2013 14:16:08 -0800 Subject: [PATCH 021/312] CLOUDSTACK-5486: UI > tags > listXXXXXXX API now returns tags property. So, use tags property in embedded object returned by listXXXXXXX API to populate tags in detailView in all pages (instead of calling extra API listTags). --- ui/scripts/sharedFunctions.js | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ui/scripts/sharedFunctions.js b/ui/scripts/sharedFunctions.js index 2425c5b6510..66eaf7ad0f4 100644 --- a/ui/scripts/sharedFunctions.js +++ b/ui/scripts/sharedFunctions.js @@ -2066,6 +2066,11 @@ cloudStack.api = { } }, dataProvider: function(args) { + args.response.success({ + data: args.jsonObj.tags + }); + + /* var resourceId = args.context[contextId][0].id; var data = { resourceId: resourceId, @@ -2096,6 +2101,7 @@ cloudStack.api = { args.response.error(parseXMLHttpResponse(json)); } }); + */ } }; } From 78e4dd8566b7a088785e7c16bc1395000530e32a Mon Sep 17 00:00:00 2001 From: Prachi Damle Date: Tue, 17 Dec 2013 12:22:19 -0800 Subject: [PATCH 022/312] CLOUDSTACK-4852: Since upgrade to 4.2 only users at the zone-attached domain level can manipulate VMs Changes: - The vmprofile owner passed in to the planner should be the VM's account and not the caller - Do not do the access check for Root Admin Conflicts: server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java --- .../src/com/cloud/vm/VirtualMachineManagerImpl.java | 3 ++- server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java index 21b873bd922..fce60d211db 100755 --- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -850,7 +850,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } - VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, template, offering, account, params); + Account owner = _entityMgr.findById(Account.class, vm.getAccountId()); + VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, template, offering, owner, params); DeployDestination dest = null; try { dest = _dpMgr.planDeployment(vmProfile, plan, avoids); diff --git a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java index d2699eb3693..ca2ff42e455 100644 --- a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -474,7 +474,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy // check if zone is dedicated. if yes check if vm owner has acess to it. DedicatedResourceVO dedicatedZone = _dedicatedDao.findByZoneId(dc.getId()); - if (dedicatedZone != null) { + if (dedicatedZone != null && !_accountMgr.isRootAdmin(vmProfile.getOwner().getType())) { long accountDomainId = vmProfile.getOwner().getDomainId(); long accountId = vmProfile.getOwner().getAccountId(); From 6d6d11f7a498f3dbf1d0e199d20fb6de6ea065ee Mon Sep 17 00:00:00 2001 From: Prachi Damle Date: Tue, 17 Dec 2013 14:55:29 -0800 Subject: [PATCH 023/312] CLOUDSTACK-4852: Since upgrade to 4.2 only users at the zone-attached domain level can manipulate VMs Changes: - Removing the trailing spaces in the file --- .../src/com/cloud/vm/VirtualMachineManagerImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java index fce60d211db..75ab47b35d2 100755 --- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -850,7 +850,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } - Account owner = _entityMgr.findById(Account.class, vm.getAccountId()); + Account owner = _entityMgr.findById(Account.class, vm.getAccountId()); VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, template, offering, owner, params); DeployDestination dest = null; try { From f4ee1a8115cd927ce15feb9af60b18d108dcd6e7 Mon Sep 17 00:00:00 2001 From: Alex Huang Date: Tue, 17 Dec 2013 14:18:40 -0800 Subject: [PATCH 024/312] reverted 3a3fec3cb6bb4f9a008370ea02279d286654b01a because it fails a unit test --- .../cloud/network/vpc/NetworkACLServiceImpl.java | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java b/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java index a95ef1a05fd..ade83e385b6 100644 --- a/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java +++ b/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java @@ -23,8 +23,6 @@ import java.util.Map; import javax.ejb.Local; import javax.inject.Inject; -import com.cloud.network.vpc.dao.VpcDao; -import org.apache.cloudstack.api.command.user.network.ListNetworkACLListsCmd; import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -32,6 +30,7 @@ import org.springframework.stereotype.Component; import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.command.user.network.CreateNetworkACLCmd; +import org.apache.cloudstack.api.command.user.network.ListNetworkACLListsCmd; import org.apache.cloudstack.api.command.user.network.ListNetworkACLsCmd; import org.apache.cloudstack.context.CallContext; @@ -43,6 +42,7 @@ import com.cloud.network.Networks; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; import com.cloud.network.vpc.dao.NetworkACLDao; +import com.cloud.network.vpc.dao.VpcDao; import com.cloud.network.vpc.dao.VpcGatewayDao; import com.cloud.projects.Project.ListProjectResourcesCriteria; import com.cloud.server.ResourceTag.ResourceObjectType; @@ -579,16 +579,8 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ @Override public boolean revokeNetworkACLItem(long ruleId) { NetworkACLItemVO aclItem = _networkACLItemDao.findById(ruleId); - if(aclItem != null){ - NetworkACL acl = _networkAclMgr.getNetworkACL(aclItem.getAclId()); - - Vpc vpc = _entityMgr.findById(Vpc.class, acl.getVpcId()); - - Account caller = CallContext.current().getCallingAccount(); - - _accountMgr.checkAccess(caller, null, true, vpc); - - if((aclItem.getAclId() == NetworkACL.DEFAULT_ALLOW) || (aclItem.getAclId() == NetworkACL.DEFAULT_DENY)){ + if (aclItem != null) { + if ((aclItem.getAclId() == NetworkACL.DEFAULT_ALLOW) || (aclItem.getAclId() == NetworkACL.DEFAULT_DENY)) { throw new InvalidParameterValueException("ACL Items in default ACL cannot be deleted"); } } From c7e2914bcd3cfb52c59d9037bac61398530e4f9e Mon Sep 17 00:00:00 2001 From: Brian Federle Date: Tue, 17 Dec 2013 15:41:11 -0800 Subject: [PATCH 025/312] CLOUDSTACK-5476: Fix missing settings tab on zone details --- ui/css/cloudstack3.css | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ui/css/cloudstack3.css b/ui/css/cloudstack3.css index b1811f1ff18..a82298c0ff2 100644 --- a/ui/css/cloudstack3.css +++ b/ui/css/cloudstack3.css @@ -1646,11 +1646,11 @@ div.list-view td.state.off span { .ui-tabs li a { float: left; - padding: 15px; + padding: 15px 10px; min-width: 91px; text-align: center; font-size: 11px; - margin-right: 12px; + margin-right: 5px; color: #4E6070; text-decoration: none; /*+placement:shift 0px 2px;*/ @@ -3535,6 +3535,10 @@ div.view table td.editable div.edit input { z-index: 1; } +.detail-view div.view table td.editable div.edit { + width: 116px; +} + div.view table td.editable div.action { float: left; width: 16px; From e54d1dec9dba72d49cd4594c5c67a872e1fd33d2 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Tue, 17 Dec 2013 16:04:21 -0800 Subject: [PATCH 026/312] CLOUDSTACK-5533: Disable VR's DNS functionality if user choose to use external dns Also fix the regression that external dns won't be used if "dns" service in the network offering is unset. --- .../network/router/VirtualNetworkApplianceManagerImpl.java | 2 +- .../patches/debian/config/etc/init.d/cloud-early-config | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index c2902b1c7f8..7f4a2642686 100755 --- a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -2137,7 +2137,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V boolean useExtDns = !dnsProvided; /* For backward compatibility */ - useExtDns = UseExternalDnsServers.valueIn(dc.getId()); + useExtDns = useExtDns || UseExternalDnsServers.valueIn(dc.getId()); if (useExtDns) { buf.append(" useextdns=true"); diff --git a/systemvm/patches/debian/config/etc/init.d/cloud-early-config b/systemvm/patches/debian/config/etc/init.d/cloud-early-config index d4bf0eb3d92..05661fc2a99 100755 --- a/systemvm/patches/debian/config/etc/init.d/cloud-early-config +++ b/systemvm/patches/debian/config/etc/init.d/cloud-early-config @@ -715,6 +715,12 @@ setup_dnsmasq() { then [ $ETH0_IP ] && NS="$INTERNAL_DNS,$NS" [ $ETH0_IP6 ] && NS6="[::],$NS6" + # enable dns + sed -i -e "/^[#]*port=.*$/d" /etc/dnsmasq.conf + else + # disable dns + sed -i -e "/^[#]*port=.*$/d" /etc/dnsmasq.conf + echo "port=0" >> /etc/dnsmasq.conf fi NS=${NS%?} NS6=${NS6%?} From e53081fe26291ade2187b9b57f74467a896fafb1 Mon Sep 17 00:00:00 2001 From: Min Chen Date: Tue, 17 Dec 2013 16:54:37 -0800 Subject: [PATCH 027/312] CLOUDSTACK-5534: MySQL exception raised when searching for users with keyword. --- server/src/com/cloud/api/query/QueryManagerImpl.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/server/src/com/cloud/api/query/QueryManagerImpl.java b/server/src/com/cloud/api/query/QueryManagerImpl.java index 91222760dc1..8ae5159c0be 100644 --- a/server/src/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/com/cloud/api/query/QueryManagerImpl.java @@ -424,8 +424,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { ssc.addOr("email", SearchCriteria.Op.LIKE, "%" + keyword + "%"); ssc.addOr("state", SearchCriteria.Op.LIKE, "%" + keyword + "%"); ssc.addOr("accountName", SearchCriteria.Op.LIKE, "%" + keyword + "%"); - ssc.addOr("type", SearchCriteria.Op.LIKE, "%" + keyword + "%"); - ssc.addOr("accountState", SearchCriteria.Op.LIKE, "%" + keyword + "%"); + ssc.addOr("accountType", SearchCriteria.Op.LIKE, "%" + keyword + "%"); sc.addAnd("username", SearchCriteria.Op.SC, ssc); } From 55237f8b15a1f1efc6c7f2b7252459cfbb40b994 Mon Sep 17 00:00:00 2001 From: Harikrishna Patnala Date: Tue, 17 Dec 2013 13:00:04 +0530 Subject: [PATCH 028/312] CLOUDSTACK-5507: Unable to add XenServer 5.6 host to cloudstack Fixed "ImportError: No module named cloudstack_pluginlib" on Xenserver 5.6 Signed-off-by: Jayapal --- scripts/vm/hypervisor/xenserver/xenserver56/patch | 1 + scripts/vm/hypervisor/xenserver/xenserver56fp1/patch | 1 + 2 files changed, 2 insertions(+) diff --git a/scripts/vm/hypervisor/xenserver/xenserver56/patch b/scripts/vm/hypervisor/xenserver/xenserver56/patch index 50116935789..9473bca8d5c 100644 --- a/scripts/vm/hypervisor/xenserver/xenserver56/patch +++ b/scripts/vm/hypervisor/xenserver/xenserver56/patch @@ -29,6 +29,7 @@ NFSSR.py=/opt/xensource/sm vmops=..,0755,/etc/xapi.d/plugins vmopsSnapshot=..,0755,/etc/xapi.d/plugins +cloudstack_pluginlib.py=..,0755,/etc/xapi.d/plugins hostvmstats.py=..,0755,/opt/xensource/sm systemvm.iso=../../../../../vms,0644,/opt/xensource/packages/iso id_rsa.cloud=../../../systemvm,0600,/root/.ssh diff --git a/scripts/vm/hypervisor/xenserver/xenserver56fp1/patch b/scripts/vm/hypervisor/xenserver/xenserver56fp1/patch index 55d538edf4a..c91aa73821a 100644 --- a/scripts/vm/hypervisor/xenserver/xenserver56fp1/patch +++ b/scripts/vm/hypervisor/xenserver/xenserver56fp1/patch @@ -29,6 +29,7 @@ NFSSR.py=/opt/xensource/sm vmops=..,0755,/etc/xapi.d/plugins vmopsSnapshot=..,0755,/etc/xapi.d/plugins +cloudstack_pluginlib.py=..,0755,/etc/xapi.d/plugins hostvmstats.py=..,0755,/opt/xensource/sm systemvm.iso=../../../../../vms,0644,/opt/xensource/packages/iso id_rsa.cloud=../../../systemvm,0600,/root/.ssh From 96309a7867066156a8fec7251680a39a574a95b5 Mon Sep 17 00:00:00 2001 From: Jayapal Date: Wed, 18 Dec 2013 11:42:23 +0530 Subject: [PATCH 029/312] Fixed issue in getting interface number in IpAssocVpcCommand Regression from the below commit commit 494ccd821d711a2957531d1c33274ed293e4d925 Author: ynojima Date: Wed Nov 6 11:02:56 2013 -0700 Bugfix: VR has double NICs connected to Public network --- .../network/router/VpcVirtualNetworkApplianceManagerImpl.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java index c3099c7754a..8e8ffa0bab0 100644 --- a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java @@ -789,7 +789,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian // should this be a vlan id or a broadcast uri??? String vlanTag = BroadcastDomainType.getValue(network.getBroadcastUri()); String netmask = NetUtils.getCidrNetmask(network.getCidr()); - PrivateIpAddress ip = new PrivateIpAddress(ipVO, vlanTag, network.getGateway(), netmask, guestNic.getMacAddress()); + PrivateIpAddress ip = new PrivateIpAddress(ipVO, network.getBroadcastUri().toString(), network.getGateway(), netmask, guestNic.getMacAddress()); List privateIps = new ArrayList(1); privateIps.add(ip); @@ -932,7 +932,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian // or maybe conditional; in case of vlan ... in case of lswitch String vlanTag = BroadcastDomainType.getValue(network.getBroadcastUri()); String netmask = NetUtils.getCidrNetmask(network.getCidr()); - PrivateIpAddress ip = new PrivateIpAddress(ipVO, vlanTag, network.getGateway(), netmask, privateNic.getMacAddress()); + PrivateIpAddress ip = new PrivateIpAddress(ipVO, network.getBroadcastUri().toString(), network.getGateway(), netmask, privateNic.getMacAddress()); List privateIps = new ArrayList(1); privateIps.add(ip); From d810ab46feeea233cb4fdfdd3e342a598cbd609b Mon Sep 17 00:00:00 2001 From: Kishan Kavala Date: Wed, 18 Dec 2013 14:21:07 +0530 Subject: [PATCH 030/312] Revert "reverted 3a3fec3cb6bb4f9a008370ea02279d286654b01a because it fails a unit test" This reverts commit f4ee1a8115cd927ce15feb9af60b18d108dcd6e7. --- .../cloud/network/vpc/NetworkACLServiceImpl.java | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java b/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java index ade83e385b6..a95ef1a05fd 100644 --- a/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java +++ b/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java @@ -23,6 +23,8 @@ import java.util.Map; import javax.ejb.Local; import javax.inject.Inject; +import com.cloud.network.vpc.dao.VpcDao; +import org.apache.cloudstack.api.command.user.network.ListNetworkACLListsCmd; import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -30,7 +32,6 @@ import org.springframework.stereotype.Component; import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.command.user.network.CreateNetworkACLCmd; -import org.apache.cloudstack.api.command.user.network.ListNetworkACLListsCmd; import org.apache.cloudstack.api.command.user.network.ListNetworkACLsCmd; import org.apache.cloudstack.context.CallContext; @@ -42,7 +43,6 @@ import com.cloud.network.Networks; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; import com.cloud.network.vpc.dao.NetworkACLDao; -import com.cloud.network.vpc.dao.VpcDao; import com.cloud.network.vpc.dao.VpcGatewayDao; import com.cloud.projects.Project.ListProjectResourcesCriteria; import com.cloud.server.ResourceTag.ResourceObjectType; @@ -579,8 +579,16 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ @Override public boolean revokeNetworkACLItem(long ruleId) { NetworkACLItemVO aclItem = _networkACLItemDao.findById(ruleId); - if (aclItem != null) { - if ((aclItem.getAclId() == NetworkACL.DEFAULT_ALLOW) || (aclItem.getAclId() == NetworkACL.DEFAULT_DENY)) { + if(aclItem != null){ + NetworkACL acl = _networkAclMgr.getNetworkACL(aclItem.getAclId()); + + Vpc vpc = _entityMgr.findById(Vpc.class, acl.getVpcId()); + + Account caller = CallContext.current().getCallingAccount(); + + _accountMgr.checkAccess(caller, null, true, vpc); + + if((aclItem.getAclId() == NetworkACL.DEFAULT_ALLOW) || (aclItem.getAclId() == NetworkACL.DEFAULT_DENY)){ throw new InvalidParameterValueException("ACL Items in default ACL cannot be deleted"); } } From 961834661cbd3606623e399f4985ea6e86101adc Mon Sep 17 00:00:00 2001 From: Jayapal Date: Wed, 18 Dec 2013 14:49:47 +0530 Subject: [PATCH 031/312] CLOUDSTACK-5466:Fixed freeing ip address in removeIpFromNic --- server/src/com/cloud/network/NetworkServiceImpl.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/server/src/com/cloud/network/NetworkServiceImpl.java b/server/src/com/cloud/network/NetworkServiceImpl.java index b7ffe269533..0e3aae9f538 100755 --- a/server/src/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/com/cloud/network/NetworkServiceImpl.java @@ -756,12 +756,12 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { // Verify input parameters NicSecondaryIpVO secIpVO = _nicSecondaryIpDao.findById(ipAddressId); if (secIpVO == null) { - throw new InvalidParameterValueException("Unable to find ip address by id"); + throw new InvalidParameterValueException("Unable to find secondary ip address by id"); } VirtualMachine vm = _userVmDao.findById(secIpVO.getVmId()); if (vm == null) { - throw new InvalidParameterValueException("There is no vm with the nic"); + throw new InvalidParameterValueException("There is no vm with the given secondary ip"); } // verify permissions _accountMgr.checkAccess(caller, null, true, vm); @@ -790,7 +790,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { throw new InvalidParameterValueException("Invalid zone Id is given"); } - s_logger.debug("Calling the ip allocation ..."); + s_logger.debug("Calling secondary ip "+ secIpVO.getIp4Address() + " release "); if (dc.getNetworkType() == NetworkType.Advanced && network.getGuestType() == Network.GuestType.Isolated) { //check PF or static NAT is configured on this ip address String secondaryIp = secIpVO.getIp4Address(); @@ -812,7 +812,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { publicIpVO.getId()); } } else if (dc.getNetworkType() == NetworkType.Basic || ntwkOff.getGuestType() == Network.GuestType.Shared) { - final IPAddressVO ip = _ipAddressDao.findByIpAndNetworkId(secIpVO.getNetworkId(), secIpVO.getIp4Address()); + final IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(secIpVO.getNetworkId(), secIpVO.getIp4Address()); if (ip != null) { Transaction.execute(new TransactionCallbackNoReturn() { @Override From 55a6df450153063a25f05d24f79587371f2082b1 Mon Sep 17 00:00:00 2001 From: Antonio Fornie Date: Mon, 16 Dec 2013 16:43:34 +0100 Subject: [PATCH 032/312] Resources leaks, refactoring and testing Removing resource leaks from UsageSanityChecker and refactoring it (encapsulation, removal of copy and paste, constants...) Modularize static method for closing Statments in TransactionLegacy and reusing this new method from other classes (Upgrade2214to30) Create Unit and Integration Tests for UsageSanityChecker Add DBUnit cases and integration profile for nitegration tests as a base for future DB tests --- .../cloud/upgrade/dao/Upgrade2214to30.java | 40 +- .../com/cloud/utils/db/TransactionLegacy.java | 24 +- usage/pom.xml | 26 ++ .../com/cloud/usage/UsageSanityChecker.java | 362 ++++++++++-------- .../com/cloud/usage/UsageSanityCheckerIT.java | 144 +++++++ .../cloud/usage/UsageSanityCheckerTest.java | 52 +++ usage/test/resources/cloud1.xml | 15 + usage/test/resources/cloud2.xml | 15 + usage/test/resources/cloud3.xml | 4 + usage/test/resources/cloud_usage1.xml | 17 + usage/test/resources/cloud_usage2.xml | 34 ++ usage/test/resources/cloud_usage3.xml | 4 + 12 files changed, 558 insertions(+), 179 deletions(-) create mode 100644 usage/test/com/cloud/usage/UsageSanityCheckerIT.java create mode 100644 usage/test/com/cloud/usage/UsageSanityCheckerTest.java create mode 100644 usage/test/resources/cloud1.xml create mode 100644 usage/test/resources/cloud2.xml create mode 100644 usage/test/resources/cloud3.xml create mode 100644 usage/test/resources/cloud_usage1.xml create mode 100644 usage/test/resources/cloud_usage2.xml create mode 100644 usage/test/resources/cloud_usage3.xml diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade2214to30.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade2214to30.java index 62fdcf10dd2..58dd916ea6e 100644 --- a/engine/schema/src/com/cloud/upgrade/dao/Upgrade2214to30.java +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade2214to30.java @@ -34,6 +34,7 @@ import org.apache.log4j.Logger; import com.cloud.offering.NetworkOffering; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.crypt.EncryptionSecretKeyChecker; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; @@ -110,19 +111,6 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { return new File[] {new File(script)}; } - protected void closePstmts(List pstmt2Close){ - for(PreparedStatement pstmt : pstmt2Close) { - try { - if (pstmt != null && !pstmt.isClosed()) { - pstmt.close(); - } - } catch (SQLException e) { - // It's not possible to recover from this and we need to continue closing - e.printStackTrace(); - } - } - } - private void setupPhysicalNetworks(Connection conn) { /** * for each zone: @@ -374,7 +362,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { } catch (SQLException e) { throw new CloudRuntimeException("Exception while adding PhysicalNetworks", e); } finally { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } } @@ -454,7 +442,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { } catch (UnsupportedEncodingException e) { throw new CloudRuntimeException("Unable encrypt host_details values ", e); } finally { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } s_logger.debug("Done encrypting host details"); } @@ -500,7 +488,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { } catch (UnsupportedEncodingException e) { throw new CloudRuntimeException("Unable encrypt vm_instance vnc_password ", e); } finally { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } s_logger.debug("Done encrypting vm_instance vnc_password"); } @@ -533,7 +521,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { } catch (UnsupportedEncodingException e) { throw new CloudRuntimeException("Unable encrypt user secret key ", e); } finally { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } s_logger.debug("Done encrypting user keys"); } @@ -566,7 +554,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { } catch (UnsupportedEncodingException e) { throw new CloudRuntimeException("Unable encrypt vpn_users password ", e); } finally { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } s_logger.debug("Done encrypting vpn_users password"); } @@ -687,7 +675,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { } catch (SQLException e) { throw new CloudRuntimeException("Unable to create service/provider map for network offerings", e); } finally { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } } @@ -725,7 +713,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { } catch (SQLException e) { throw new CloudRuntimeException("Unable to update domain network ref", e); } finally { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } } @@ -759,7 +747,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { } catch (SQLException e) { throw new CloudRuntimeException("Unable to create service/provider map for networks", e); } finally { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } } @@ -873,7 +861,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { pstmt.close(); } catch (SQLException e) { } - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } } @@ -904,7 +892,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { } catch (SQLException e) { throw new CloudRuntimeException("Unable to update op_host_capacity table. ", e); } finally { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } } @@ -1011,7 +999,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { pstmt.close(); } catch (SQLException e) { } - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } } @@ -1084,7 +1072,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { zoneIds.add(rs.getLong(1)); } } catch (SQLException e) { - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); throw new CloudRuntimeException("Unable to switch networks to the new network offering", e); } @@ -1167,7 +1155,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { pstmt.close(); } catch (SQLException e) { } - closePstmts(pstmt2Close); + TransactionLegacy.closePstmts(pstmt2Close); } } diff --git a/framework/db/src/com/cloud/utils/db/TransactionLegacy.java b/framework/db/src/com/cloud/utils/db/TransactionLegacy.java index b36c02746a6..ac0ea21d1f1 100755 --- a/framework/db/src/com/cloud/utils/db/TransactionLegacy.java +++ b/framework/db/src/com/cloud/utils/db/TransactionLegacy.java @@ -769,7 +769,7 @@ public class TransactionLegacy { try { // we should only close db connection when it is not user managed - if (this._dbId != CONNECTED_DB) { + if (_dbId != CONNECTED_DB) { if (s_connLogger.isTraceEnabled()) { s_connLogger.trace("Closing DB connection: dbconn" + System.identityHashCode(_conn)); } @@ -1212,6 +1212,26 @@ public class TransactionLegacy { * @param conn */ protected void setConnection(Connection conn) { - this._conn = conn; + _conn = conn; } + + /** + * Receives a list of {@link PreparedStatement} and quietly closes all of them, which + * triggers also closing their dependent objects, like a {@link ResultSet} + * + * @param pstmt2Close + */ + public static void closePstmts(List pstmt2Close) { + for (PreparedStatement pstmt : pstmt2Close) { + try { + if (pstmt != null && !pstmt.isClosed()) { + pstmt.close(); + } + } catch (SQLException e) { + // It's not possible to recover from this and we need to continue closing + e.printStackTrace(); + } + } + } + } diff --git a/usage/pom.xml b/usage/pom.xml index a0055ccc12c..ecd765eabaa 100644 --- a/usage/pom.xml +++ b/usage/pom.xml @@ -44,6 +44,13 @@ commons-daemon commons-daemon + + + org.dbunit + dbunit + 2.4.9 + test + @@ -213,5 +220,24 @@ + + integration + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + + integration-test + verify + + + + + + + diff --git a/usage/src/com/cloud/usage/UsageSanityChecker.java b/usage/src/com/cloud/usage/UsageSanityChecker.java index 34032bcbd74..5e6123bc777 100644 --- a/usage/src/com/cloud/usage/UsageSanityChecker.java +++ b/usage/src/com/cloud/usage/UsageSanityChecker.java @@ -25,191 +25,182 @@ import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.log4j.Logger; import com.cloud.utils.db.TransactionLegacy; +/** + * This class must not be used concurrently because its state changes often during + * execution in a non synchronized way + */ public class UsageSanityChecker { - private StringBuffer errors; - private String lastCheckId = ""; - private final String lastCheckFile = "/usr/local/libexec/sanity-check-last-id"; + protected static final Logger s_logger = Logger.getLogger(UsageSanityChecker.class); + protected static final int DEFAULT_AGGREGATION_RANGE = 1440; + protected StringBuilder errors; + protected List checkCases; + protected String lastCheckFile = "/usr/local/libexec/sanity-check-last-id"; + protected String lastCheckId = ""; + protected int lastId = -1; + protected int maxId = -1; + protected Connection conn; - private boolean checkMaxUsage(Connection conn) throws SQLException { + protected void reset() { + errors = new StringBuilder(); + checkCases = new ArrayList(); + } - PreparedStatement pstmt = conn.prepareStatement("SELECT value FROM `cloud`.`configuration` where name = 'usage.stats.job.aggregation.range'"); - ResultSet rs = pstmt.executeQuery(); + protected boolean checkItemCountByPstmt() throws SQLException { + boolean checkOk = true; - int aggregationRange = 1440; - if (rs.next()) { - aggregationRange = rs.getInt(1); - } else { - System.out.println("Failed to retrieve aggregation range. Using default : " + aggregationRange); + for(CheckCase check : checkCases) { + checkOk &= checkItemCountByPstmt(check); + } + + return checkOk; + } + + protected boolean checkItemCountByPstmt(CheckCase checkCase) throws SQLException { + List pstmt2Close = new ArrayList(); + boolean checkOk = true; + + /* + * Check for item usage records which are created after it is removed + */ + PreparedStatement pstmt; + try { + pstmt = conn.prepareStatement(checkCase.sqlTemplate); + if(checkCase.checkId) { + pstmt.setInt(1, lastId); + pstmt.setInt(2, maxId); + } + + pstmt2Close.add(pstmt); + ResultSet rs = pstmt.executeQuery(); + if (rs.next() && (rs.getInt(1) > 0)) { + errors.append(String.format("Error: Found %s %s\n", rs.getInt(1), checkCase.itemName)); + checkOk = false; + } + } catch (SQLException e) { + throw e; + } finally { + TransactionLegacy.closePstmts(pstmt2Close); + } + return checkOk; + } + + protected void checkMaxUsage() throws SQLException { + int aggregationRange = DEFAULT_AGGREGATION_RANGE; + List pstmt2Close = new ArrayList(); + try { + PreparedStatement pstmt = conn.prepareStatement( + "SELECT value FROM `cloud`.`configuration` where name = 'usage.stats.job.aggregation.range'"); + pstmt2Close.add(pstmt); + ResultSet rs = pstmt.executeQuery(); + + if (rs.next()) { + aggregationRange = rs.getInt(1); + } else { + s_logger.debug("Failed to retrieve aggregation range. Using default : " + aggregationRange); + } + } catch (SQLException e) { + throw e; + } finally { + TransactionLegacy.closePstmts(pstmt2Close); } int aggregationHours = aggregationRange / 60; - /* - * Check for usage records with raw_usage > aggregationHours - */ - pstmt = - conn.prepareStatement("SELECT count(*) FROM `cloud_usage`.`cloud_usage` cu where usage_type not in (4,5) and raw_usage > " + aggregationHours + lastCheckId); - rs = pstmt.executeQuery(); - if (rs.next() && (rs.getInt(1) > 0)) { - errors.append("Error: Found " + rs.getInt(1) + " usage records with raw_usage > " + aggregationHours); - errors.append("\n"); - return false; - } - return true; + addCheckCase("SELECT count(*) FROM `cloud_usage`.`cloud_usage` cu where usage_type not in (4,5) and raw_usage > " + + aggregationHours, + "usage records with raw_usage > " + aggregationHours, + lastCheckId); } - private boolean checkVmUsage(Connection conn) throws SQLException { - boolean success = true; - /* - * Check for Vm usage records which are created after the vm is destroyed - */ - PreparedStatement pstmt = - conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner join cloud.vm_instance vm where vm.type = 'User' " + - "and cu.usage_type in (1 , 2) and cu.usage_id = vm.id and cu.start_date > vm.removed" + lastCheckId); - ResultSet rs = pstmt.executeQuery(); - if (rs.next() && (rs.getInt(1) > 0)) { - errors.append("Error: Found " + rs.getInt(1) + " Vm usage records which are created after Vm is destroyed"); - errors.append("\n"); - success = false; - } + protected void checkVmUsage() { + addCheckCase("select count(*) from cloud_usage.cloud_usage cu inner join cloud.vm_instance vm " + + "where vm.type = 'User' and cu.usage_type in (1 , 2) " + + "and cu.usage_id = vm.id and cu.start_date > vm.removed ", + "Vm usage records which are created after Vm is destroyed", + lastCheckId); - /* - * Check for Vms which have multiple running vm records in helper table - */ - pstmt = - conn.prepareStatement("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_vm_instance where usage_type =1 " - + "and end_date is null group by vm_instance_id having count(vm_instance_id) > 1) c ;"); - rs = pstmt.executeQuery(); - if (rs.next() && (rs.getInt(1) > 0)) { - errors.append("Error: Found " + rs.getInt(1) + " duplicate running Vm entries in vm usage helper table"); - errors.append("\n"); - success = false; - } + addCheckCase("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_vm_instance " + + "where usage_type =1 and end_date is null group by vm_instance_id " + + "having count(vm_instance_id) > 1) c ;", + "duplicate running Vm entries in vm usage helper table"); - /* - * Check for Vms which have multiple allocated vm records in helper table - */ - pstmt = - conn.prepareStatement("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_vm_instance where usage_type =2 " - + "and end_date is null group by vm_instance_id having count(vm_instance_id) > 1) c ;"); - rs = pstmt.executeQuery(); - if (rs.next() && (rs.getInt(1) > 0)) { - errors.append("Error: Found " + rs.getInt(1) + " duplicate allocated Vm entries in vm usage helper table"); - errors.append("\n"); - success = false; - } + addCheckCase("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_vm_instance " + + "where usage_type =2 and end_date is null group by vm_instance_id " + + "having count(vm_instance_id) > 1) c ;", + "duplicate allocated Vm entries in vm usage helper table"); - /* - * Check for Vms which have running vm entry without allocated vm entry in helper table - */ - pstmt = - conn.prepareStatement("select count(vm_instance_id) from cloud_usage.usage_vm_instance o where o.end_date is null and o.usage_type=1 and not exists " - + "(select 1 from cloud_usage.usage_vm_instance i where i.vm_instance_id=o.vm_instance_id and usage_type=2 and i.end_date is null)"); - rs = pstmt.executeQuery(); - if (rs.next() && (rs.getInt(1) > 0)) { - errors.append("Error: Found " + rs.getInt(1) + " running Vm entries without corresponding allocated entries in vm usage helper table"); - errors.append("\n"); - success = false; - } - return success; + addCheckCase("select count(vm_instance_id) from cloud_usage.usage_vm_instance o " + + "where o.end_date is null and o.usage_type=1 and not exists " + + "(select 1 from cloud_usage.usage_vm_instance i where " + + "i.vm_instance_id=o.vm_instance_id and usage_type=2 and i.end_date is null)", + "running Vm entries without corresponding allocated entries in vm usage helper table"); } - private boolean checkVolumeUsage(Connection conn) throws SQLException { - boolean success = true; - /* - * Check for Volume usage records which are created after the volume is removed - */ - PreparedStatement pstmt = - conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner join cloud.volumes v " + - "where cu.usage_type = 6 and cu.usage_id = v.id and cu.start_date > v.removed" + lastCheckId); - ResultSet rs = pstmt.executeQuery(); - if (rs.next() && (rs.getInt(1) > 0)) { - errors.append("Error: Found " + rs.getInt(1) + " volume usage records which are created after volume is removed"); - errors.append("\n"); - success = false; - } + protected void checkVolumeUsage() { + addCheckCase("select count(*) from cloud_usage.cloud_usage cu inner join cloud.volumes v where " + + "cu.usage_type = 6 and cu.usage_id = v.id and cu.start_date > v.removed ", + "volume usage records which are created after volume is removed", + lastCheckId); - /* - * Check for duplicate records in volume usage helper table - */ - pstmt = - conn.prepareStatement("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_volume " - + "where deleted is null group by id having count(id) > 1) c;"); - rs = pstmt.executeQuery(); - if (rs.next() && (rs.getInt(1) > 0)) { - errors.append("Error: Found " + rs.getInt(1) + " duplicate records is volume usage helper table"); - errors.append("\n"); - success = false; - } - return success; + addCheckCase("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_volume " + + "where deleted is null group by id having count(id) > 1) c;", + "duplicate records in volume usage helper table"); } - private boolean checkTemplateISOUsage(Connection conn) throws SQLException { - /* - * Check for Template/ISO usage records which are created after it is removed - */ - PreparedStatement pstmt = - conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner join cloud.template_zone_ref tzr " + - "where cu.usage_id = tzr.template_id and cu.zone_id = tzr.zone_id and cu.usage_type in (7,8) and cu.start_date > tzr.removed" + lastCheckId); - ResultSet rs = pstmt.executeQuery(); - if (rs.next() && (rs.getInt(1) > 0)) { - errors.append("Error: Found " + rs.getInt(1) + " template/ISO usage records which are created after it is removed"); - errors.append("\n"); - return false; - } - return true; + protected void checkTemplateISOUsage() { + addCheckCase("select count(*) from cloud_usage.cloud_usage cu inner join cloud.template_zone_ref tzr where " + + "cu.usage_id = tzr.template_id and cu.zone_id = tzr.zone_id and cu.usage_type in (7,8) and cu.start_date > tzr.removed ", + "template/ISO usage records which are created after it is removed", + lastCheckId); } - private boolean checkSnapshotUsage(Connection conn) throws SQLException { - /* - * Check for snapshot usage records which are created after snapshot is removed - */ - PreparedStatement pstmt = - conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner join cloud.snapshots s " + - "where cu.usage_id = s.id and cu.usage_type = 9 and cu.start_date > s.removed" + lastCheckId); - ResultSet rs = pstmt.executeQuery(); - if (rs.next() && (rs.getInt(1) > 0)) { - errors.append("Error: Found " + rs.getInt(1) + " snapshot usage records which are created after snapshot is removed"); - errors.append("\n"); - return false; - } - return true; + protected void checkSnapshotUsage() { + addCheckCase("select count(*) from cloud_usage.cloud_usage cu inner join cloud.snapshots s where " + + "cu.usage_id = s.id and cu.usage_type = 9 and cu.start_date > s.removed ", + "snapshot usage records which are created after it is removed", + lastCheckId); } - public String runSanityCheck() throws SQLException { + protected void readLastCheckId(){ + BufferedReader reader = null; try { - BufferedReader reader = new BufferedReader(new FileReader(lastCheckFile)); - String last_id = null; - if ((reader != null) && (last_id = reader.readLine()) != null) { - int lastId = Integer.parseInt(last_id); - if (lastId > 0) { - lastCheckId = " and cu.id > " + last_id; - } + reader = new BufferedReader(new FileReader(lastCheckFile)); + String lastIdText = null; + lastId = -1; + if ((reader != null) && (lastIdText = reader.readLine()) != null) { + lastId = Integer.parseInt(lastIdText); + } + } catch (IOException e) { + s_logger.error(e); + } finally { + try { + reader.close(); + } catch (IOException e) { + s_logger.error(e); } - reader.close(); - } catch (Exception e) { - // Error while reading last check id } + } - Connection conn = TransactionLegacy.getStandaloneConnection(); - int maxId = 0; + protected void readMaxId() throws SQLException { PreparedStatement pstmt = conn.prepareStatement("select max(id) from cloud_usage.cloud_usage"); ResultSet rs = pstmt.executeQuery(); + maxId = -1; if (rs.next() && (rs.getInt(1) > 0)) { maxId = rs.getInt(1); - lastCheckId += " and cu.id <= " + maxId; + lastCheckId += " and cu.id <= ?"; } - errors = new StringBuffer(); - checkMaxUsage(conn); - checkVmUsage(conn); - checkVolumeUsage(conn); - checkTemplateISOUsage(conn); - checkSnapshotUsage(conn); - FileWriter fstream; + } + + protected void updateNewMaxId() { + FileWriter fstream = null; try { fstream = new FileWriter(lastCheckFile); BufferedWriter out = new BufferedWriter(fstream); @@ -217,20 +208,89 @@ public class UsageSanityChecker { out.close(); } catch (IOException e) { // Error while writing last check id + } finally { + if (fstream != null) { + try { + fstream.close(); + } catch (IOException e) { + s_logger.error(e); + } + } } + } + + public String runSanityCheck() throws SQLException { + + readLastCheckId(); + if (lastId > 0) { + lastCheckId = " and cu.id > ?"; + } + + conn = getConnection(); + readMaxId(); + + reset(); + + checkMaxUsage(); + checkVmUsage(); + checkVolumeUsage(); + checkTemplateISOUsage(); + checkSnapshotUsage(); + + checkItemCountByPstmt(); + return errors.toString(); } + /** + * Local acquisition of {@link Connection} to remove static cling + * @return + */ + protected Connection getConnection() { + return TransactionLegacy.getStandaloneConnection(); + } + public static void main(String args[]) { UsageSanityChecker usc = new UsageSanityChecker(); String sanityErrors; try { sanityErrors = usc.runSanityCheck(); if (sanityErrors.length() > 0) { - System.out.println(sanityErrors.toString()); + s_logger.error(sanityErrors.toString()); } } catch (SQLException e) { e.printStackTrace(); } } + + protected void addCheckCase(String sqlTemplate, String itemName, String lastCheckId) { + checkCases.add(new CheckCase(sqlTemplate, itemName, lastCheckId)); + } + + protected void addCheckCase(String sqlTemplate, String itemName) { + checkCases.add(new CheckCase(sqlTemplate, itemName)); + } } + + +/** + * Just an abstraction of the kind of check to repeat across these cases + * encapsulating what change for each specific case + */ +class CheckCase { + public String sqlTemplate; + public String itemName; + public boolean checkId = false; + + public CheckCase(String sqlTemplate, String itemName, String lastCheckId) { + checkId = true; + this.sqlTemplate = sqlTemplate + lastCheckId; + this.itemName = itemName; + } + + public CheckCase(String sqlTemplate, String itemName) { + checkId = false; + this.sqlTemplate = sqlTemplate; + this.itemName = itemName; + } +} \ No newline at end of file diff --git a/usage/test/com/cloud/usage/UsageSanityCheckerIT.java b/usage/test/com/cloud/usage/UsageSanityCheckerIT.java new file mode 100644 index 00000000000..412bebc6106 --- /dev/null +++ b/usage/test/com/cloud/usage/UsageSanityCheckerIT.java @@ -0,0 +1,144 @@ +package com.cloud.usage; + +import org.dbunit.DatabaseUnitException; +import org.dbunit.dataset.DataSetException; +import org.dbunit.dataset.IDataSet; +import org.dbunit.dataset.xml.FlatXmlDataSetBuilder; +import org.dbunit.ext.mysql.MySqlConnection; +import org.dbunit.operation.DatabaseOperation; + +import static org.junit.Assert.assertEquals; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; +import org.mockito.Mockito; + +import com.cloud.utils.PropertiesUtil; + +import java.io.FileNotFoundException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Properties; + +@RunWith(Parameterized.class) +public class UsageSanityCheckerIT{ + + protected Connection cloudConn; + + protected Connection usageConn; + + protected MySqlConnection dbuUsageConn; + + protected MySqlConnection dbuCloudConn; + + protected Properties properties = new Properties(); + + protected IDataSet cloudDataSet; + + protected IDataSet usageDataSet; + + protected String cloudDbuFileName; + + protected String usageDbuFileName; + + protected String expectedErrors; + + protected static final String EXPECTED_ERRORS_1 = "Error: Found 2 usage records with raw_usage > 10\n" + + "Error: Found 1 Vm usage records which are created after Vm is destroyed\n" + + "Error: Found 2 duplicate allocated Vm entries in vm usage helper table\n" + + "Error: Found 1 running Vm entries without corresponding allocated entries in vm usage helper table\n" + + "Error: Found 1 volume usage records which are created after volume is removed\n" + + "Error: Found 1 template/ISO usage records which are created after it is removed\n" + + "Error: Found 1 snapshot usage records which are created after it is removed\n"; + + protected static final String EXPECTED_ERRORS_2 = "Error: Found 3 usage records with raw_usage > 10\n" + + "Error: Found 1 Vm usage records which are created after Vm is destroyed\n" + + "Error: Found 8 duplicate running Vm entries in vm usage helper table\n" + + "Error: Found 4 duplicate allocated Vm entries in vm usage helper table\n" + + "Error: Found 4 running Vm entries without corresponding allocated entries in vm usage helper table\n" + + "Error: Found 2 volume usage records which are created after volume is removed\n" + + "Error: Found 6 duplicate records in volume usage helper table\n" + + "Error: Found 2 template/ISO usage records which are created after it is removed\n" + + "Error: Found 1 snapshot usage records which are created after it is removed\n"; + + protected static final String EXPECTED_ERRORS_3 = ""; + + + public UsageSanityCheckerIT(String cloudDbuFileName, String usageDbuFileName, + String expectedErrors) { + this.cloudDbuFileName = cloudDbuFileName; + this.usageDbuFileName = usageDbuFileName; + this.expectedErrors = expectedErrors; + } + + @Parameters + public static Collection data() { + Object [][] data = new Object[][] { + {"cloud1.xml", "cloud_usage1.xml", EXPECTED_ERRORS_1}, + {"cloud2.xml", "cloud_usage2.xml", EXPECTED_ERRORS_2}, + {"cloud3.xml", "cloud_usage3.xml", EXPECTED_ERRORS_3} + }; + return Arrays.asList(data); + } + + protected Connection createConnection(String dbSchema) throws SQLException { + String cloudDbUrl = "jdbc:mysql://"+properties.getProperty("db."+dbSchema+".host") + + ":" + properties.getProperty("db."+dbSchema+".port") + "/" + + properties.getProperty("db."+dbSchema+".name"); + return DriverManager.getConnection(cloudDbUrl, properties.getProperty("db."+dbSchema+".username"), + properties.getProperty("db."+dbSchema+".password")); + } + + @Before + public void setUp() throws Exception { + PropertiesUtil.loadFromFile(properties, PropertiesUtil.findConfigFile("db.properties")); + + Class.forName("com.mysql.jdbc.Driver"); + cloudConn = createConnection("cloud"); + usageConn = createConnection("usage"); + + dbuCloudConn = new MySqlConnection(cloudConn, properties.getProperty("db.cloud.name")); + dbuUsageConn = new MySqlConnection(usageConn, properties.getProperty("db.usage.name")); + cloudDataSet = getCloudDataSet(); + usageDataSet = getUsageDataSet(); + DatabaseOperation.CLEAN_INSERT.execute(dbuCloudConn, cloudDataSet); + DatabaseOperation.CLEAN_INSERT.execute(dbuUsageConn, usageDataSet); + } + + @After + public void tearDown() throws DataSetException, FileNotFoundException, DatabaseUnitException, SQLException { + DatabaseOperation.DELETE_ALL.execute(dbuCloudConn, getCloudDataSet()); + DatabaseOperation.DELETE_ALL.execute(dbuUsageConn, getUsageDataSet()); + } + + @Test + public void testRunSanityCheck() throws SQLException, ClassNotFoundException, FileNotFoundException, DatabaseUnitException { + // Prepare + UsageSanityChecker checker = Mockito.spy(new UsageSanityChecker()); + Mockito.doReturn(cloudConn).when(checker).getConnection(); + Mockito.doNothing().when(checker).readLastCheckId(); + Mockito.doNothing().when(checker).updateNewMaxId(); + checker.lastId = 2; + + // Execute + String actualErrors = checker.runSanityCheck(); + + // Assert + assertEquals("Expected errors not found", expectedErrors, actualErrors); + } + + protected IDataSet getCloudDataSet() throws DataSetException, FileNotFoundException { + return new FlatXmlDataSetBuilder().build(PropertiesUtil.openStreamFromURL(cloudDbuFileName)); + } + + protected IDataSet getUsageDataSet() throws DataSetException, FileNotFoundException { + return new FlatXmlDataSetBuilder().build(PropertiesUtil.openStreamFromURL(usageDbuFileName)); + } +} diff --git a/usage/test/com/cloud/usage/UsageSanityCheckerTest.java b/usage/test/com/cloud/usage/UsageSanityCheckerTest.java new file mode 100644 index 00000000000..346a8ad3cdb --- /dev/null +++ b/usage/test/com/cloud/usage/UsageSanityCheckerTest.java @@ -0,0 +1,52 @@ +package com.cloud.usage; + +import org.junit.Test; +import org.mockito.Mockito; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import junit.framework.TestCase; + +public class UsageSanityCheckerTest extends TestCase { + + @Test + public void testCheckItemCountByPstmt() throws SQLException { + // Prepare + // Mock dependencies to exclude from the test + String sqlTemplate1 = "SELECT * FROM mytable1"; + String sqlTemplate2 = "SELECT * FROM mytable2"; + + Connection conn = Mockito.mock(Connection.class); + PreparedStatement pstmt = Mockito.mock(PreparedStatement.class); + ResultSet rs = Mockito.mock(ResultSet.class); + + Mockito.when(conn.prepareStatement(sqlTemplate1)).thenReturn(pstmt); + Mockito.when(conn.prepareStatement(sqlTemplate2)).thenReturn(pstmt); + Mockito.when(pstmt.executeQuery()).thenReturn(rs, rs); + + // First if: true -> 8 + // Second loop: true -> 16 + Mockito.when(rs.next()).thenReturn(true, true); + Mockito.when(rs.getInt(1)).thenReturn(8, 8, 16, 16); + + // Prepare class under test + UsageSanityChecker checker = new UsageSanityChecker(); + checker.conn = conn; + checker.reset(); + checker.addCheckCase(sqlTemplate1, "item1"); + checker.addCheckCase(sqlTemplate2, "item2"); + + // Execute + checker.checkItemCountByPstmt(); + + // Verify + Pattern pattern = Pattern.compile(".*8.*item1.*\n.*16.*item2.*"); + Matcher matcher = pattern.matcher(checker.errors); + assertTrue("Didn't create complete errors. It should create 2 errors: 8 item1 and 16 item2", matcher.find()); + } +} diff --git a/usage/test/resources/cloud1.xml b/usage/test/resources/cloud1.xml new file mode 100644 index 00000000000..e56ed073e01 --- /dev/null +++ b/usage/test/resources/cloud1.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/usage/test/resources/cloud2.xml b/usage/test/resources/cloud2.xml new file mode 100644 index 00000000000..e56ed073e01 --- /dev/null +++ b/usage/test/resources/cloud2.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/usage/test/resources/cloud3.xml b/usage/test/resources/cloud3.xml new file mode 100644 index 00000000000..6e7a7453d06 --- /dev/null +++ b/usage/test/resources/cloud3.xml @@ -0,0 +1,4 @@ + + + + diff --git a/usage/test/resources/cloud_usage1.xml b/usage/test/resources/cloud_usage1.xml new file mode 100644 index 00000000000..aaad17d3322 --- /dev/null +++ b/usage/test/resources/cloud_usage1.xml @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff --git a/usage/test/resources/cloud_usage2.xml b/usage/test/resources/cloud_usage2.xml new file mode 100644 index 00000000000..d87d05a8734 --- /dev/null +++ b/usage/test/resources/cloud_usage2.xml @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/usage/test/resources/cloud_usage3.xml b/usage/test/resources/cloud_usage3.xml new file mode 100644 index 00000000000..332ddfc2496 --- /dev/null +++ b/usage/test/resources/cloud_usage3.xml @@ -0,0 +1,4 @@ + + + + From cc27a740dc43bff0796666f668964fc4f4badd78 Mon Sep 17 00:00:00 2001 From: Kishan Kavala Date: Wed, 18 Dec 2013 18:22:07 +0530 Subject: [PATCH 033/312] CLOUDSTACK-5528 : When VR version is not known set requiresUpgrade flag to true Conflicts: server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java server/test/com/cloud/vpc/NetworkACLServiceTest.java --- .../src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java | 3 +++ server/test/com/cloud/vpc/NetworkACLServiceTest.java | 1 + 2 files changed, 4 insertions(+) diff --git a/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java index ae661501ce9..f8838d84c35 100644 --- a/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java @@ -81,6 +81,9 @@ public class DomainRouterJoinDaoImpl extends GenericDaoBase Date: Wed, 18 Dec 2013 14:30:26 +0100 Subject: [PATCH 034/312] rats --- .../com/cloud/usage/UsageSanityCheckerIT.java | 37 +++++++++++++------ .../cloud/usage/UsageSanityCheckerTest.java | 22 +++++++++-- usage/test/resources/cloud1.xml | 9 +++++ usage/test/resources/cloud2.xml | 9 +++++ usage/test/resources/cloud3.xml | 9 +++++ usage/test/resources/cloud_usage1.xml | 9 +++++ usage/test/resources/cloud_usage2.xml | 9 +++++ usage/test/resources/cloud_usage3.xml | 9 +++++ 8 files changed, 99 insertions(+), 14 deletions(-) diff --git a/usage/test/com/cloud/usage/UsageSanityCheckerIT.java b/usage/test/com/cloud/usage/UsageSanityCheckerIT.java index 412bebc6106..d8a65800018 100644 --- a/usage/test/com/cloud/usage/UsageSanityCheckerIT.java +++ b/usage/test/com/cloud/usage/UsageSanityCheckerIT.java @@ -1,14 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package com.cloud.usage; +import static org.junit.Assert.assertEquals; + +import java.io.FileNotFoundException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Properties; + import org.dbunit.DatabaseUnitException; import org.dbunit.dataset.DataSetException; import org.dbunit.dataset.IDataSet; import org.dbunit.dataset.xml.FlatXmlDataSetBuilder; import org.dbunit.ext.mysql.MySqlConnection; import org.dbunit.operation.DatabaseOperation; - -import static org.junit.Assert.assertEquals; - import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -19,14 +42,6 @@ import org.mockito.Mockito; import com.cloud.utils.PropertiesUtil; -import java.io.FileNotFoundException; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.util.Arrays; -import java.util.Collection; -import java.util.Properties; - @RunWith(Parameterized.class) public class UsageSanityCheckerIT{ diff --git a/usage/test/com/cloud/usage/UsageSanityCheckerTest.java b/usage/test/com/cloud/usage/UsageSanityCheckerTest.java index 346a8ad3cdb..69e3ea18acc 100644 --- a/usage/test/com/cloud/usage/UsageSanityCheckerTest.java +++ b/usage/test/com/cloud/usage/UsageSanityCheckerTest.java @@ -1,8 +1,21 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package com.cloud.usage; -import org.junit.Test; -import org.mockito.Mockito; - import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -12,6 +25,9 @@ import java.util.regex.Pattern; import junit.framework.TestCase; +import org.junit.Test; +import org.mockito.Mockito; + public class UsageSanityCheckerTest extends TestCase { @Test diff --git a/usage/test/resources/cloud1.xml b/usage/test/resources/cloud1.xml index e56ed073e01..e148c581e9e 100644 --- a/usage/test/resources/cloud1.xml +++ b/usage/test/resources/cloud1.xml @@ -1,3 +1,12 @@ + diff --git a/usage/test/resources/cloud2.xml b/usage/test/resources/cloud2.xml index e56ed073e01..e148c581e9e 100644 --- a/usage/test/resources/cloud2.xml +++ b/usage/test/resources/cloud2.xml @@ -1,3 +1,12 @@ + diff --git a/usage/test/resources/cloud3.xml b/usage/test/resources/cloud3.xml index 6e7a7453d06..4188c1c2ff1 100644 --- a/usage/test/resources/cloud3.xml +++ b/usage/test/resources/cloud3.xml @@ -1,3 +1,12 @@ + diff --git a/usage/test/resources/cloud_usage1.xml b/usage/test/resources/cloud_usage1.xml index aaad17d3322..20528bf2aab 100644 --- a/usage/test/resources/cloud_usage1.xml +++ b/usage/test/resources/cloud_usage1.xml @@ -1,3 +1,12 @@ + diff --git a/usage/test/resources/cloud_usage2.xml b/usage/test/resources/cloud_usage2.xml index d87d05a8734..7cc3991ed1a 100644 --- a/usage/test/resources/cloud_usage2.xml +++ b/usage/test/resources/cloud_usage2.xml @@ -1,3 +1,12 @@ + diff --git a/usage/test/resources/cloud_usage3.xml b/usage/test/resources/cloud_usage3.xml index 332ddfc2496..d8922e1f01e 100644 --- a/usage/test/resources/cloud_usage3.xml +++ b/usage/test/resources/cloud_usage3.xml @@ -1,3 +1,12 @@ + From 34174bbcbba6d32d7e4e50aada014f471954d97e Mon Sep 17 00:00:00 2001 From: Brian Federle Date: Wed, 18 Dec 2013 13:09:42 -0800 Subject: [PATCH 035/312] Add missing strings for quiesce VM, SMB fields --- client/WEB-INF/classes/resources/messages.properties | 4 ++++ ui/dictionary.jsp | 4 ++++ ui/scripts/instances.js | 2 +- ui/scripts/storage.js | 2 +- ui/scripts/system.js | 12 ++++++------ ui/scripts/zoneWizard.js | 6 +++--- 6 files changed, 19 insertions(+), 11 deletions(-) diff --git a/client/WEB-INF/classes/resources/messages.properties b/client/WEB-INF/classes/resources/messages.properties index 384a0778bb2..fda4e2d997e 100644 --- a/client/WEB-INF/classes/resources/messages.properties +++ b/client/WEB-INF/classes/resources/messages.properties @@ -14,6 +14,10 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +label.quiesce.vm=Quiesce VM +label.smb.username=SMB Username +label.smb.password=SMB Password +label.smb.domain=SMB Domain label.hypervisors=Hypervisors label.home=Home label.sockets=Sockets diff --git a/ui/dictionary.jsp b/ui/dictionary.jsp index c870f12a90e..8e75969906c 100644 --- a/ui/dictionary.jsp +++ b/ui/dictionary.jsp @@ -25,6 +25,10 @@ under the License. <% long now = System.currentTimeMillis(); %>