mirror of https://github.com/apache/cloudstack.git
Merge pull request #1594 from nvazquez/vmnetworkmapissue
CLOUDSTACK-9407: vm_network_map table doesnt get cleaned up properlyJIRA TICKET: https://issues.apache.org/jira/browse/CLOUDSTACK-9407 ### Introduction It was found out that in production environments `vm_network_map` table entries were slowly growing. It was investigated how this entries were cleaned up. ### Behaviour On vm creation, vm mappings are inserted on `vm_network_map`. On vm stop, mappings are deleted from `vm_network_map` for vm, as a result of the release of its nics. ### Problem If created vm is stopped from hypervisor side (at least on vSphere in which we tested it), when CloudStack realizes vm is stopped it doesn't clean up `vm_network_table,` and, as cleanup is made during vm stop, when vm is eventually destroyed and expunged it won't clean up their entries in that table. ### Proposed solution We propose to move `vm_network_map` table cleanup to expunge command instead of stop command. * pr/1594: CLOUDSTACK-9407: Refactor CLOUDSTACK-9407: Release network resources on expunge command Signed-off-by: Will Stevens <williamstevens@gmail.com>
This commit is contained in:
commit
9be93c6e90
|
|
@ -1428,6 +1428,15 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
advanceStop(vm, cleanUpEvenIfUnableToStop);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send StopCommand to stop vm.<br/>
|
||||
* <strong>Not releasing network resources until expunge command is sent</strong>
|
||||
* @param vm virtual machine
|
||||
* @param cleanUpEvenIfUnableToStop if true -> cleanup even if vm cannot be stopped. if false -> not cleaning up if vm cannot be stopped.
|
||||
* @throws AgentUnavailableException
|
||||
* @throws OperationTimedoutException
|
||||
* @throws ConcurrentOperationException
|
||||
*/
|
||||
private void advanceStop(final VMInstanceVO vm, final boolean cleanUpEvenIfUnableToStop) throws AgentUnavailableException, OperationTimedoutException,
|
||||
ConcurrentOperationException {
|
||||
final State state = vm.getState();
|
||||
|
|
@ -1576,13 +1585,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
s_logger.debug(vm + " is stopped on the host. Proceeding to release resource held.");
|
||||
}
|
||||
|
||||
try {
|
||||
_networkMgr.release(profile, cleanUpEvenIfUnableToStop);
|
||||
s_logger.debug("Successfully released network resources for the vm " + vm);
|
||||
} catch (final Exception e) {
|
||||
s_logger.warn("Unable to release some network resources.", e);
|
||||
}
|
||||
|
||||
try {
|
||||
if (vm.getHypervisorType() != HypervisorType.BareMetal) {
|
||||
volumeMgr.release(profile);
|
||||
|
|
|
|||
|
|
@ -2046,6 +2046,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
return false;
|
||||
}
|
||||
try {
|
||||
|
||||
releaseNetworkResourcesOnExpunge(vm.getId());
|
||||
|
||||
List<VolumeVO> rootVol = _volsDao.findByInstanceAndType(vm.getId(), Volume.Type.ROOT);
|
||||
// expunge the vm
|
||||
_itMgr.advanceExpunge(vm.getUuid());
|
||||
|
|
@ -2086,6 +2089,23 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Release network resources, it was done on vm stop previously.
|
||||
* @param id vm id
|
||||
* @throws ConcurrentOperationException
|
||||
* @throws ResourceUnavailableException
|
||||
*/
|
||||
private void releaseNetworkResourcesOnExpunge(long id) throws ConcurrentOperationException, ResourceUnavailableException {
|
||||
final VMInstanceVO vmInstance = _vmDao.findById(id);
|
||||
if (vmInstance != null){
|
||||
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vmInstance);
|
||||
_networkMgr.release(profile, false);
|
||||
}
|
||||
else {
|
||||
s_logger.error("Couldn't find vm with id = " + id + ", unable to release network resources");
|
||||
}
|
||||
}
|
||||
|
||||
private boolean cleanupVmResources(long vmId) {
|
||||
boolean success = true;
|
||||
// Remove vm from security groups
|
||||
|
|
|
|||
Loading…
Reference in New Issue