mirror of https://github.com/apache/cloudstack.git
Compare commits
4 Commits
5a0c1d22d4
...
38cd205eec
| Author | SHA1 | Date |
|---|---|---|
|
|
38cd205eec | |
|
|
cd5bb09d0d | |
|
|
b5e9178078 | |
|
|
bfbb8bb1ef |
|
|
@ -969,6 +969,13 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||
}
|
||||
|
||||
logger.trace("End cleanup expired async-jobs");
|
||||
|
||||
// 3) Cleanup orphaned networks stuck in Implementing state without async jobs
|
||||
try {
|
||||
cleanupOrphanedNetworks();
|
||||
} catch (Throwable e) {
|
||||
logger.error("Unexpected exception when trying to cleanup orphaned networks", e);
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
logger.error("Unexpected exception when trying to execute queue item, ", e);
|
||||
}
|
||||
|
|
@ -1284,6 +1291,74 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup networks that are stuck in Implementing state without associated async jobs.
|
||||
* This only processes networks that have been stuck for longer than the job expiration threshold.
|
||||
*/
|
||||
private void cleanupOrphanedNetworks() {
|
||||
try {
|
||||
SearchCriteria<NetworkVO> sc = networkDao.createSearchCriteria();
|
||||
sc.addAnd("state", SearchCriteria.Op.EQ, Network.State.Implementing);
|
||||
sc.addAnd("removed", SearchCriteria.Op.NULL);
|
||||
List<NetworkVO> implementingNetworks = networkDao.search(sc, null);
|
||||
|
||||
if (implementingNetworks == null || implementingNetworks.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
logger.debug("Found {} networks in Implementing state, checking for orphaned networks", implementingNetworks.size());
|
||||
|
||||
final long expireMinutes = JobExpireMinutes.value();
|
||||
final Date cutoffTime = new Date(System.currentTimeMillis() - (expireMinutes * 60 * 1000));
|
||||
|
||||
for (NetworkVO network : implementingNetworks) {
|
||||
if (network.getCreated().after(cutoffTime)) {
|
||||
logger.trace("Network {} in Implementing state is only {} minutes old (threshold: {} minutes), skipping cleanup",
|
||||
network.getId(),
|
||||
(System.currentTimeMillis() - network.getCreated().getTime()) / 60000,
|
||||
expireMinutes);
|
||||
continue;
|
||||
}
|
||||
|
||||
List<AsyncJobVO> jobs = _jobDao.findInstancePendingAsyncJobs("Network", network.getAccountId());
|
||||
boolean hasActiveJob = false;
|
||||
for (AsyncJobVO job : jobs) {
|
||||
if (job.getInstanceId() != null && job.getInstanceId().equals(network.getId())) {
|
||||
hasActiveJob = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (hasActiveJob) {
|
||||
logger.debug("Network {} in Implementing state has active async job, skipping cleanup", network.getId());
|
||||
continue;
|
||||
}
|
||||
|
||||
logger.warn("Found orphaned network {} in Implementing state without async job. " +
|
||||
"Network created: {}, age: {} minutes, expiration threshold: {} minutes. Transitioning to Shutdown state.",
|
||||
network.getId(), network.getCreated(),
|
||||
(System.currentTimeMillis() - network.getCreated().getTime()) / 60000,
|
||||
expireMinutes);
|
||||
updateNetworkState(network);
|
||||
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("Error while cleaning up orphaned networks", e);
|
||||
}
|
||||
}
|
||||
|
||||
private void updateNetworkState(NetworkVO network) {
|
||||
try {
|
||||
networkOrchestrationService.stateTransitTo(network, Network.Event.OperationFailed);
|
||||
logger.info("Successfully transitioned orphaned network {} to Shutdown state using state machine", network.getId());
|
||||
} catch (final NoTransitionException e) {
|
||||
logger.debug("State transition failed for orphaned network {}, forcing state update", network.getId());
|
||||
network.setState(Network.State.Shutdown);
|
||||
networkDao.update(network.getId(), network);
|
||||
logger.info("Successfully forced orphaned network {} to Shutdown state", network.getId());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onManagementNodeJoined(List<? extends ManagementServerHost> nodeList, long selfNodeId) {
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2527,7 +2527,7 @@
|
|||
"label.vnf.app.action.reinstall": "Reinstall VNF Appliance",
|
||||
"label.vnf.cidr.list": "CIDR from which access to the VNF appliance's Management interface should be allowed from",
|
||||
"label.vnf.cidr.list.tooltip": "the CIDR list to forward traffic from to the VNF management interface. Multiple entries must be separated by a single comma character (,). The default value is 0.0.0.0/0.",
|
||||
"label.vnf.configure.management": "Configure Firewall and Port Forwarding rules for VNF's management interfaces",
|
||||
"label.vnf.configure.management": "Configure network rules for VNF's management interfaces",
|
||||
"label.vnf.configure.management.tooltip": "True by default, security group or network rules (source nat and firewall rules) will be configured for VNF management interfaces. False otherwise. Learn what rules are configured at http://docs.cloudstack.apache.org/en/latest/adminguide/networking/vnf_templates_appliances.html#deploying-vnf-appliances",
|
||||
"label.vnf.detail.add": "Add VNF detail",
|
||||
"label.vnf.detail.remove": "Remove VNF detail",
|
||||
|
|
|
|||
|
|
@ -356,7 +356,10 @@ export default {
|
|||
permission: ['listVnfAppliances'],
|
||||
resourceType: 'UserVm',
|
||||
params: () => {
|
||||
return { details: 'servoff,tmpl,nics', isvnf: true }
|
||||
return {
|
||||
details: 'group,nics,secgrp,tmpl,servoff,diskoff,iso,volume,affgrp,backoff,vnfnics',
|
||||
isvnf: true
|
||||
}
|
||||
},
|
||||
columns: () => {
|
||||
const fields = ['name', 'state', 'ipaddress']
|
||||
|
|
|
|||
|
|
@ -1305,7 +1305,7 @@ export default {
|
|||
for (const deviceId of managementDeviceIds) {
|
||||
if (this.vnfNicNetworks && this.vnfNicNetworks[deviceId] &&
|
||||
((this.vnfNicNetworks[deviceId].type === 'Isolated' && this.vnfNicNetworks[deviceId].vpcid === undefined) ||
|
||||
(this.vnfNicNetworks[deviceId].type === 'Shared' && this.zone.securitygroupsenabled))) {
|
||||
(this.vnfNicNetworks[deviceId].type === 'Shared' && this.vnfNicNetworks[deviceId].service.filter(svc => svc.name === 'SecurityGroupProvider')))) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -120,7 +120,7 @@ export default {
|
|||
methods: {
|
||||
fetchData () {
|
||||
var params = {
|
||||
details: 'servoff,tmpl,nics',
|
||||
details: 'group,nics,secgrp,tmpl,servoff,diskoff,iso,volume,affgrp,backoff,vnfnics',
|
||||
isVnf: true,
|
||||
listAll: true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,9 +40,11 @@ import java.util.concurrent.ScheduledExecutorService;
|
|||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.cloudstack.utils.security.KeyStoreUtils;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
|
@ -708,13 +710,31 @@ public class Script implements Callable<String> {
|
|||
return executeCommandForExitValue(0, command);
|
||||
}
|
||||
|
||||
private static void cleanupProcesses(AtomicReference<List<Process>> processesRef) {
|
||||
List<Process> processes = processesRef.get();
|
||||
if (CollectionUtils.isNotEmpty(processes)) {
|
||||
for (Process process : processes) {
|
||||
if (process == null) {
|
||||
continue;
|
||||
}
|
||||
LOGGER.trace(String.format("Cleaning up process [%s] from piped commands.", process.pid()));
|
||||
IOUtils.closeQuietly(process.getErrorStream());
|
||||
IOUtils.closeQuietly(process.getOutputStream());
|
||||
IOUtils.closeQuietly(process.getInputStream());
|
||||
process.destroyForcibly();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static Pair<Integer, String> executePipedCommands(List<String[]> commands, long timeout) {
|
||||
if (timeout <= 0) {
|
||||
timeout = DEFAULT_TIMEOUT;
|
||||
}
|
||||
final AtomicReference<List<Process>> processesRef = new AtomicReference<>();
|
||||
Callable<Pair<Integer, String>> commandRunner = () -> {
|
||||
List<ProcessBuilder> builders = commands.stream().map(ProcessBuilder::new).collect(Collectors.toList());
|
||||
List<Process> processes = ProcessBuilder.startPipeline(builders);
|
||||
processesRef.set(processes);
|
||||
Process last = processes.get(processes.size()-1);
|
||||
try (BufferedReader reader = new BufferedReader(new InputStreamReader(last.getInputStream()))) {
|
||||
String line;
|
||||
|
|
@ -741,6 +761,8 @@ public class Script implements Callable<String> {
|
|||
result.second(ERR_TIMEOUT);
|
||||
} catch (InterruptedException | ExecutionException e) {
|
||||
LOGGER.error("Error executing piped commands", e);
|
||||
} finally {
|
||||
cleanupProcesses(processesRef);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue