Compare commits

...

8 Commits

Author SHA1 Message Date
Pearl Dsilva 4c75f4f655
Merge 89d66a1c61 into bce3e54a7e 2026-01-22 15:20:28 +01:00
Daman Arora bce3e54a7e
improve error handling for template upload notifications (#12412)
Co-authored-by: Daman Arora <daman.arora@shapeblue.com>
2026-01-22 15:02:46 +01:00
Nicolas Vazquez 6a9835904c
Fix for zoneids parameters length on updateAPIs (#12440) 2026-01-22 14:57:46 +01:00
Nicolas Vazquez 6846619a6f
Fix update network offering domainids size limitation (#12431) 2026-01-22 14:32:46 +01:00
Vishesh d1eb2822d9
Remove redundant Exceptions from logs for vm schedules (#12428) 2026-01-22 14:29:35 +01:00
Pearl Dsilva 89d66a1c61 tweak logic to consider tags and rule tags at zone and cluster scope 2026-01-16 15:11:01 -05:00
Pearl Dsilva f48cc9cc8a tweak logic to consider tags and rule tags at zone and cluster scope 2026-01-16 15:01:44 -05:00
Pearl Dsilva dc5ba75f6c Consider flexible tags as for detached volume migration 2026-01-16 08:28:04 -05:00
10 changed files with 104 additions and 26 deletions

View File

@ -78,6 +78,7 @@ public class UpdateNetworkOfferingCmd extends BaseCmd {
@Parameter(name = ApiConstants.DOMAIN_ID,
type = CommandType.STRING,
length = 4096,
description = "The ID of the containing domain(s) as comma separated string, public for public offerings")
private String domainIds;

View File

@ -75,6 +75,7 @@ public class UpdateDiskOfferingCmd extends BaseCmd {
@Parameter(name = ApiConstants.ZONE_ID,
type = CommandType.STRING,
description = "The ID of the containing zone(s) as comma separated string, all for all zones offerings",
length = 4096,
since = "4.13")
private String zoneIds;

View File

@ -69,6 +69,7 @@ public class UpdateServiceOfferingCmd extends BaseCmd {
@Parameter(name = ApiConstants.ZONE_ID,
type = CommandType.STRING,
description = "The ID of the containing zone(s) as comma separated string, all for all zones offerings",
length = 4096,
since = "4.13")
private String zoneIds;

View File

@ -65,6 +65,7 @@ public class UpdateVPCOfferingCmd extends BaseAsyncCmd {
@Parameter(name = ApiConstants.ZONE_ID,
type = CommandType.STRING,
description = "The ID of the containing zone(s) as comma separated string, all for all zones offerings",
length = 4096,
since = "4.13")
private String zoneIds;

View File

@ -31,4 +31,6 @@ public interface VMScheduledJobDao extends GenericDao<VMScheduledJobVO, Long> {
int expungeJobsForSchedules(List<Long> scheduleId, Date dateAfter);
int expungeJobsBefore(Date currentTimestamp);
VMScheduledJobVO findByScheduleAndTimestamp(long scheduleId, Date scheduledTimestamp);
}

View File

@ -39,6 +39,8 @@ public class VMScheduledJobDaoImpl extends GenericDaoBase<VMScheduledJobVO, Long
private final SearchBuilder<VMScheduledJobVO> expungeJobForScheduleSearch;
private final SearchBuilder<VMScheduledJobVO> scheduleAndTimestampSearch;
static final String SCHEDULED_TIMESTAMP = "scheduled_timestamp";
static final String VM_SCHEDULE_ID = "vm_schedule_id";
@ -58,6 +60,11 @@ public class VMScheduledJobDaoImpl extends GenericDaoBase<VMScheduledJobVO, Long
expungeJobForScheduleSearch.and(VM_SCHEDULE_ID, expungeJobForScheduleSearch.entity().getVmScheduleId(), SearchCriteria.Op.IN);
expungeJobForScheduleSearch.and(SCHEDULED_TIMESTAMP, expungeJobForScheduleSearch.entity().getScheduledTime(), SearchCriteria.Op.GTEQ);
expungeJobForScheduleSearch.done();
scheduleAndTimestampSearch = createSearchBuilder();
scheduleAndTimestampSearch.and(VM_SCHEDULE_ID, scheduleAndTimestampSearch.entity().getVmScheduleId(), SearchCriteria.Op.EQ);
scheduleAndTimestampSearch.and(SCHEDULED_TIMESTAMP, scheduleAndTimestampSearch.entity().getScheduledTime(), SearchCriteria.Op.EQ);
scheduleAndTimestampSearch.done();
}
/**
@ -92,4 +99,12 @@ public class VMScheduledJobDaoImpl extends GenericDaoBase<VMScheduledJobVO, Long
sc.setParameters(SCHEDULED_TIMESTAMP, date);
return expunge(sc);
}
@Override
public VMScheduledJobVO findByScheduleAndTimestamp(long scheduleId, Date scheduledTimestamp) {
SearchCriteria<VMScheduledJobVO> sc = scheduleAndTimestampSearch.create();
sc.setParameters(VM_SCHEDULE_ID, scheduleId);
sc.setParameters(SCHEDULED_TIMESTAMP, scheduledTimestamp);
return findOneBy(sc);
}
}

View File

@ -25,11 +25,11 @@ import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.TimeZone;
import java.util.UUID;
@ -663,7 +663,6 @@ import com.cloud.alert.AlertVO;
import com.cloud.alert.dao.AlertDao;
import com.cloud.api.ApiDBUtils;
import com.cloud.api.query.dao.StoragePoolJoinDao;
import com.cloud.api.query.vo.StoragePoolJoinVO;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.CapacityVO;
import com.cloud.capacity.dao.CapacityDao;
@ -1993,21 +1992,76 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
if (CollectionUtils.isEmpty(allPools)) {
return suitablePools;
}
StoragePoolVO srcPool = _poolDao.findById(volume.getPoolId());
if (srcPool == null) {
logger.warn("Source pool not found for volume {}: {}", volume.getName(), volume.getUuid());
return suitablePools;
}
DiskOfferingVO diskOffering = _diskOfferingDao.findById(diskOfferingId);
List<String> tags = new ArrayList<>();
String[] tagsArray = diskOffering.getTagsArray();
if (tagsArray != null && tagsArray.length > 0) {
tags = Arrays.asList(tagsArray);
}
Long[] poolIds = allPools.stream().map(StoragePool::getId).toArray(Long[]::new);
List<StoragePoolJoinVO> pools = _poolJoinDao.searchByIds(poolIds);
for (StoragePoolJoinVO storagePool : pools) {
if (StoragePoolStatus.Up.equals(storagePool.getStatus()) &&
(CollectionUtils.isEmpty(tags) || tags.contains(storagePool.getTag()))) {
Optional<? extends StoragePool> match = allPools.stream().filter(x -> x.getId() == storagePool.getId()).findFirst();
match.ifPresent(suitablePools::add);
List<String> tags = (tagsArray != null && tagsArray.length > 0) ? Arrays.asList(tagsArray) : new ArrayList<>();
HypervisorType hypervisorType = getHypervisorType(null, srcPool);
Long dcId = srcPool.getDataCenterId();
logger.debug("Finding suitable pools for detached volume {} with offering tags: {}, hypervisor: {}",
volume.getUuid(), tags, hypervisorType);
Set<Long> matchingPoolIds = new HashSet<>();
List<StoragePoolVO> zonePoolsStandard = _poolDao.findZoneWideStoragePoolsByTags(dcId,
tags.isEmpty() ? null : tags.toArray(new String[0]), true);
for (StoragePoolVO pool : zonePoolsStandard) {
if (pool.getHypervisor() == null || pool.getHypervisor().equals(HypervisorType.Any) ||
pool.getHypervisor().equals(hypervisorType)) {
matchingPoolIds.add(pool.getId());
logger.debug("Found zone-wide pool with standard tags: {} ({})", pool.getName(), pool.getId());
}
}
List<StoragePoolVO> zonePoolsFlexible = _poolJoinDao.findStoragePoolByScopeAndRuleTags(dcId, null, null,
ScopeType.ZONE, tags);
for (StoragePoolVO pool : zonePoolsFlexible) {
StoragePoolVO poolVO = _poolDao.findById(pool.getId());
if (poolVO != null && (poolVO.getHypervisor() == null || poolVO.getHypervisor().equals(HypervisorType.Any) ||
poolVO.getHypervisor().equals(hypervisorType))) {
matchingPoolIds.add(pool.getId());
logger.debug("Found zone-wide pool with flexible tags: {} ({})", pool.getName(), pool.getId());
}
}
List<ClusterVO> clusters = _clusterDao.listByDcHyType(dcId, hypervisorType.toString());
for (ClusterVO cluster : clusters) {
List<StoragePoolVO> clusterPoolsStandard = _poolDao.findPoolsByTags(dcId, cluster.getPodId(),
cluster.getId(), tags.isEmpty() ? null : tags.toArray(new String[0]), true,
VolumeApiServiceImpl.storageTagRuleExecutionTimeout.value());
for (StoragePoolVO pool : clusterPoolsStandard) {
matchingPoolIds.add(pool.getId());
logger.debug("Found cluster-scoped pool with standard tags: {} ({}) in cluster {}",
pool.getName(), pool.getId(), cluster.getName());
}
List<StoragePoolVO> clusterPoolsFlexible = _poolJoinDao.findStoragePoolByScopeAndRuleTags(dcId,
cluster.getPodId(), cluster.getId(), ScopeType.CLUSTER, tags);
for (StoragePoolVO pool : clusterPoolsFlexible) {
matchingPoolIds.add(pool.getId());
logger.debug("Found cluster-scoped pool with flexible tags: {} ({}) in cluster {}",
pool.getName(), pool.getId(), cluster.getName());
}
}
for (StoragePool pool : allPools) {
if (matchingPoolIds.contains(pool.getId()) && StoragePoolStatus.Up.equals(pool.getStatus())) {
suitablePools.add(pool);
logger.debug("Added pool {} to suitable pools", pool.getName());
}
}
logger.debug("Found {} suitable pools out of {} total pools for detached volume {}",
suitablePools.size(), allPools.size(), volume.getUuid());
return suitablePools;
}

View File

@ -162,7 +162,13 @@ public class VMSchedulerImpl extends ManagerBase implements VMScheduler, Configu
}
Date scheduledDateTime = Date.from(ts.toInstant());
VMScheduledJobVO scheduledJob = new VMScheduledJobVO(vmSchedule.getVmId(), vmSchedule.getId(), vmSchedule.getAction(), scheduledDateTime);
VMScheduledJobVO scheduledJob = vmScheduledJobDao.findByScheduleAndTimestamp(vmSchedule.getId(), scheduledDateTime);
if (scheduledJob != null) {
logger.trace("Job is already scheduled for schedule {} at {}", vmSchedule, scheduledDateTime);
return scheduledDateTime;
}
scheduledJob = new VMScheduledJobVO(vmSchedule.getVmId(), vmSchedule.getId(), vmSchedule.getAction(), scheduledDateTime);
try {
vmScheduledJobDao.persist(scheduledJob);
ActionEventUtils.onScheduledActionEvent(User.UID_SYSTEM, vm.getAccountId(), actionEventMap.get(vmSchedule.getAction()),

View File

@ -218,18 +218,19 @@ export const notifierPlugin = {
if (error.response.status) {
msg = `${i18n.global.t('message.request.failed')} (${error.response.status})`
}
if (error.message) {
desc = error.message
}
if (error.response.headers && 'x-description' in error.response.headers) {
if (error.response.headers?.['x-description']) {
desc = error.response.headers['x-description']
}
if (desc === '' && error.response.data) {
} else if (error.response.data) {
const responseKey = _.findKey(error.response.data, 'errortext')
if (responseKey) {
desc = error.response.data[responseKey].errortext
} else if (typeof error.response.data === 'string') {
desc = error.response.data
}
}
if (!desc && error.message) {
desc = error.message
}
}
let countNotify = store.getters.countNotify
countNotify++

View File

@ -638,11 +638,7 @@ export default {
this.$emit('refresh-data')
this.closeAction()
}).catch(e => {
this.$notification.error({
message: this.$t('message.upload.failed'),
description: `${this.$t('message.upload.template.failed.description')} - ${e}`,
duration: 0
})
this.$notifyError(e)
})
},
fetchCustomHypervisorName () {