mirror of https://github.com/apache/cloudstack.git
Compare commits
6 Commits
38cd205eec
...
05ba896d8e
| Author | SHA1 | Date |
|---|---|---|
|
|
05ba896d8e | |
|
|
bce3e54a7e | |
|
|
6a9835904c | |
|
|
6846619a6f | |
|
|
d1eb2822d9 | |
|
|
bfbb8bb1ef |
|
|
@ -78,6 +78,7 @@ public class UpdateNetworkOfferingCmd extends BaseCmd {
|
|||
|
||||
@Parameter(name = ApiConstants.DOMAIN_ID,
|
||||
type = CommandType.STRING,
|
||||
length = 4096,
|
||||
description = "The ID of the containing domain(s) as comma separated string, public for public offerings")
|
||||
private String domainIds;
|
||||
|
||||
|
|
|
|||
|
|
@ -75,6 +75,7 @@ public class UpdateDiskOfferingCmd extends BaseCmd {
|
|||
@Parameter(name = ApiConstants.ZONE_ID,
|
||||
type = CommandType.STRING,
|
||||
description = "The ID of the containing zone(s) as comma separated string, all for all zones offerings",
|
||||
length = 4096,
|
||||
since = "4.13")
|
||||
private String zoneIds;
|
||||
|
||||
|
|
|
|||
|
|
@ -69,6 +69,7 @@ public class UpdateServiceOfferingCmd extends BaseCmd {
|
|||
@Parameter(name = ApiConstants.ZONE_ID,
|
||||
type = CommandType.STRING,
|
||||
description = "The ID of the containing zone(s) as comma separated string, all for all zones offerings",
|
||||
length = 4096,
|
||||
since = "4.13")
|
||||
private String zoneIds;
|
||||
|
||||
|
|
|
|||
|
|
@ -65,6 +65,7 @@ public class UpdateVPCOfferingCmd extends BaseAsyncCmd {
|
|||
@Parameter(name = ApiConstants.ZONE_ID,
|
||||
type = CommandType.STRING,
|
||||
description = "The ID of the containing zone(s) as comma separated string, all for all zones offerings",
|
||||
length = 4096,
|
||||
since = "4.13")
|
||||
private String zoneIds;
|
||||
|
||||
|
|
|
|||
|
|
@ -31,4 +31,6 @@ public interface VMScheduledJobDao extends GenericDao<VMScheduledJobVO, Long> {
|
|||
int expungeJobsForSchedules(List<Long> scheduleId, Date dateAfter);
|
||||
|
||||
int expungeJobsBefore(Date currentTimestamp);
|
||||
|
||||
VMScheduledJobVO findByScheduleAndTimestamp(long scheduleId, Date scheduledTimestamp);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,6 +39,8 @@ public class VMScheduledJobDaoImpl extends GenericDaoBase<VMScheduledJobVO, Long
|
|||
|
||||
private final SearchBuilder<VMScheduledJobVO> expungeJobForScheduleSearch;
|
||||
|
||||
private final SearchBuilder<VMScheduledJobVO> scheduleAndTimestampSearch;
|
||||
|
||||
static final String SCHEDULED_TIMESTAMP = "scheduled_timestamp";
|
||||
|
||||
static final String VM_SCHEDULE_ID = "vm_schedule_id";
|
||||
|
|
@ -58,6 +60,11 @@ public class VMScheduledJobDaoImpl extends GenericDaoBase<VMScheduledJobVO, Long
|
|||
expungeJobForScheduleSearch.and(VM_SCHEDULE_ID, expungeJobForScheduleSearch.entity().getVmScheduleId(), SearchCriteria.Op.IN);
|
||||
expungeJobForScheduleSearch.and(SCHEDULED_TIMESTAMP, expungeJobForScheduleSearch.entity().getScheduledTime(), SearchCriteria.Op.GTEQ);
|
||||
expungeJobForScheduleSearch.done();
|
||||
|
||||
scheduleAndTimestampSearch = createSearchBuilder();
|
||||
scheduleAndTimestampSearch.and(VM_SCHEDULE_ID, scheduleAndTimestampSearch.entity().getVmScheduleId(), SearchCriteria.Op.EQ);
|
||||
scheduleAndTimestampSearch.and(SCHEDULED_TIMESTAMP, scheduleAndTimestampSearch.entity().getScheduledTime(), SearchCriteria.Op.EQ);
|
||||
scheduleAndTimestampSearch.done();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -92,4 +99,12 @@ public class VMScheduledJobDaoImpl extends GenericDaoBase<VMScheduledJobVO, Long
|
|||
sc.setParameters(SCHEDULED_TIMESTAMP, date);
|
||||
return expunge(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public VMScheduledJobVO findByScheduleAndTimestamp(long scheduleId, Date scheduledTimestamp) {
|
||||
SearchCriteria<VMScheduledJobVO> sc = scheduleAndTimestampSearch.create();
|
||||
sc.setParameters(VM_SCHEDULE_ID, scheduleId);
|
||||
sc.setParameters(SCHEDULED_TIMESTAMP, scheduledTimestamp);
|
||||
return findOneBy(sc);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -969,6 +969,13 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||
}
|
||||
|
||||
logger.trace("End cleanup expired async-jobs");
|
||||
|
||||
// 3) Cleanup orphaned networks stuck in Implementing state without async jobs
|
||||
try {
|
||||
cleanupOrphanedNetworks();
|
||||
} catch (Throwable e) {
|
||||
logger.error("Unexpected exception when trying to cleanup orphaned networks", e);
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
logger.error("Unexpected exception when trying to execute queue item, ", e);
|
||||
}
|
||||
|
|
@ -1284,6 +1291,74 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup networks that are stuck in Implementing state without associated async jobs.
|
||||
* This only processes networks that have been stuck for longer than the job expiration threshold.
|
||||
*/
|
||||
private void cleanupOrphanedNetworks() {
|
||||
try {
|
||||
SearchCriteria<NetworkVO> sc = networkDao.createSearchCriteria();
|
||||
sc.addAnd("state", SearchCriteria.Op.EQ, Network.State.Implementing);
|
||||
sc.addAnd("removed", SearchCriteria.Op.NULL);
|
||||
List<NetworkVO> implementingNetworks = networkDao.search(sc, null);
|
||||
|
||||
if (implementingNetworks == null || implementingNetworks.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
logger.debug("Found {} networks in Implementing state, checking for orphaned networks", implementingNetworks.size());
|
||||
|
||||
final long expireMinutes = JobExpireMinutes.value();
|
||||
final Date cutoffTime = new Date(System.currentTimeMillis() - (expireMinutes * 60 * 1000));
|
||||
|
||||
for (NetworkVO network : implementingNetworks) {
|
||||
if (network.getCreated().after(cutoffTime)) {
|
||||
logger.trace("Network {} in Implementing state is only {} minutes old (threshold: {} minutes), skipping cleanup",
|
||||
network.getId(),
|
||||
(System.currentTimeMillis() - network.getCreated().getTime()) / 60000,
|
||||
expireMinutes);
|
||||
continue;
|
||||
}
|
||||
|
||||
List<AsyncJobVO> jobs = _jobDao.findInstancePendingAsyncJobs("Network", network.getAccountId());
|
||||
boolean hasActiveJob = false;
|
||||
for (AsyncJobVO job : jobs) {
|
||||
if (job.getInstanceId() != null && job.getInstanceId().equals(network.getId())) {
|
||||
hasActiveJob = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (hasActiveJob) {
|
||||
logger.debug("Network {} in Implementing state has active async job, skipping cleanup", network.getId());
|
||||
continue;
|
||||
}
|
||||
|
||||
logger.warn("Found orphaned network {} in Implementing state without async job. " +
|
||||
"Network created: {}, age: {} minutes, expiration threshold: {} minutes. Transitioning to Shutdown state.",
|
||||
network.getId(), network.getCreated(),
|
||||
(System.currentTimeMillis() - network.getCreated().getTime()) / 60000,
|
||||
expireMinutes);
|
||||
updateNetworkState(network);
|
||||
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("Error while cleaning up orphaned networks", e);
|
||||
}
|
||||
}
|
||||
|
||||
private void updateNetworkState(NetworkVO network) {
|
||||
try {
|
||||
networkOrchestrationService.stateTransitTo(network, Network.Event.OperationFailed);
|
||||
logger.info("Successfully transitioned orphaned network {} to Shutdown state using state machine", network.getId());
|
||||
} catch (final NoTransitionException e) {
|
||||
logger.debug("State transition failed for orphaned network {}, forcing state update", network.getId());
|
||||
network.setState(Network.State.Shutdown);
|
||||
networkDao.update(network.getId(), network);
|
||||
logger.info("Successfully forced orphaned network {} to Shutdown state", network.getId());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onManagementNodeJoined(List<? extends ManagementServerHost> nodeList, long selfNodeId) {
|
||||
}
|
||||
|
|
|
|||
|
|
@ -162,7 +162,13 @@ public class VMSchedulerImpl extends ManagerBase implements VMScheduler, Configu
|
|||
}
|
||||
|
||||
Date scheduledDateTime = Date.from(ts.toInstant());
|
||||
VMScheduledJobVO scheduledJob = new VMScheduledJobVO(vmSchedule.getVmId(), vmSchedule.getId(), vmSchedule.getAction(), scheduledDateTime);
|
||||
VMScheduledJobVO scheduledJob = vmScheduledJobDao.findByScheduleAndTimestamp(vmSchedule.getId(), scheduledDateTime);
|
||||
if (scheduledJob != null) {
|
||||
logger.trace("Job is already scheduled for schedule {} at {}", vmSchedule, scheduledDateTime);
|
||||
return scheduledDateTime;
|
||||
}
|
||||
|
||||
scheduledJob = new VMScheduledJobVO(vmSchedule.getVmId(), vmSchedule.getId(), vmSchedule.getAction(), scheduledDateTime);
|
||||
try {
|
||||
vmScheduledJobDao.persist(scheduledJob);
|
||||
ActionEventUtils.onScheduledActionEvent(User.UID_SYSTEM, vm.getAccountId(), actionEventMap.get(vmSchedule.getAction()),
|
||||
|
|
|
|||
|
|
@ -218,18 +218,19 @@ export const notifierPlugin = {
|
|||
if (error.response.status) {
|
||||
msg = `${i18n.global.t('message.request.failed')} (${error.response.status})`
|
||||
}
|
||||
if (error.message) {
|
||||
desc = error.message
|
||||
}
|
||||
if (error.response.headers && 'x-description' in error.response.headers) {
|
||||
if (error.response.headers?.['x-description']) {
|
||||
desc = error.response.headers['x-description']
|
||||
}
|
||||
if (desc === '' && error.response.data) {
|
||||
} else if (error.response.data) {
|
||||
const responseKey = _.findKey(error.response.data, 'errortext')
|
||||
if (responseKey) {
|
||||
desc = error.response.data[responseKey].errortext
|
||||
} else if (typeof error.response.data === 'string') {
|
||||
desc = error.response.data
|
||||
}
|
||||
}
|
||||
if (!desc && error.message) {
|
||||
desc = error.message
|
||||
}
|
||||
}
|
||||
let countNotify = store.getters.countNotify
|
||||
countNotify++
|
||||
|
|
|
|||
|
|
@ -638,11 +638,7 @@ export default {
|
|||
this.$emit('refresh-data')
|
||||
this.closeAction()
|
||||
}).catch(e => {
|
||||
this.$notification.error({
|
||||
message: this.$t('message.upload.failed'),
|
||||
description: `${this.$t('message.upload.template.failed.description')} - ${e}`,
|
||||
duration: 0
|
||||
})
|
||||
this.$notifyError(e)
|
||||
})
|
||||
},
|
||||
fetchCustomHypervisorName () {
|
||||
|
|
|
|||
Loading…
Reference in New Issue