From 51f69f7134ba5f8c52714251258dde5700aa411c Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Thu, 6 Jan 2022 16:42:57 +0530 Subject: [PATCH 1/3] server: do not return inaccessible entity details to normal users (#5827) Fixes #5534 As pre 3.x APIs allow using internal DB IDs, even normal users can use internal IDs. This fix removes additional information in error message when the caller doesn't have access to the resource. Signed-off-by: Abhishek Kumar --- server/src/main/java/com/cloud/acl/DomainChecker.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/com/cloud/acl/DomainChecker.java b/server/src/main/java/com/cloud/acl/DomainChecker.java index aba0d456bfa..355d34fe814 100644 --- a/server/src/main/java/com/cloud/acl/DomainChecker.java +++ b/server/src/main/java/com/cloud/acl/DomainChecker.java @@ -178,19 +178,20 @@ public class DomainChecker extends AdapterBase implements SecurityChecker { } else { if (_accountService.isNormalUser(caller.getId())) { Account account = _accountDao.findById(entity.getAccountId()); + String errorMessage = String.format("%s does not have permission to operate with resource", caller); if (account != null && account.getType() == Account.ACCOUNT_TYPE_PROJECT) { //only project owner can delete/modify the project if (accessType != null && accessType == AccessType.ModifyProject) { if (!_projectMgr.canModifyProjectAccount(caller, account.getId())) { - throw new PermissionDeniedException(caller + " does not have permission to operate with resource " + entity); + throw new PermissionDeniedException(errorMessage); } } else if (!_projectMgr.canAccessProjectAccount(caller, account.getId())) { - throw new PermissionDeniedException(caller + " does not have permission to operate with resource " + entity); + throw new PermissionDeniedException(errorMessage); } checkOperationPermitted(caller, entity); } else { if (caller.getId() != entity.getAccountId()) { - throw new PermissionDeniedException(caller + " does not have permission to operate with resource " + entity); + throw new PermissionDeniedException(errorMessage); } } } From 2774bc156f0bcd8a44e031a54f1b4ff0a72d1620 Mon Sep 17 00:00:00 2001 From: dahn Date: Thu, 6 Jan 2022 12:48:50 +0100 Subject: [PATCH 2/3] use physical size instead of virtual size for migration. (#5750) * Use Physical size to evaluate if migration is possible * Improve logging and consider files skipped as failure in complete migration * skipped can't be negative * remove useless method * group multidisk templates for secstor migration * use enum * Update engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java Co-authored-by: sureshanaparti <12028987+sureshanaparti@users.noreply.github.com> Co-authored-by: Pearl Dsilva Co-authored-by: Daan Hoogland Co-authored-by: Pearl d'Silva --- .../subsystem/api/storage/DataObject.java | 2 + .../api/storage/SecondaryStorageService.java | 3 +- .../orchestration/DataMigrationUtility.java | 60 +++++++++++----- .../orchestration/StorageOrchestrator.java | 69 ++++++++++++++----- .../image/SecondaryStorageServiceImpl.java | 13 +++- .../storage/image/store/TemplateObject.java | 9 +++ .../storage/volume/VolumeObject.java | 9 +++ .../diagnostics/to/DiagnosticsDataObject.java | 5 ++ 8 files changed, 130 insertions(+), 40 deletions(-) diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataObject.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataObject.java index c57b01ce8b0..091c09d7a4d 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataObject.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataObject.java @@ -33,6 +33,8 @@ public interface DataObject { Long getSize(); + long getPhysicalSize(); + DataObjectType getType(); String getUuid(); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SecondaryStorageService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SecondaryStorageService.java index 07828fda5ce..90a25c9a39e 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SecondaryStorageService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SecondaryStorageService.java @@ -39,5 +39,6 @@ public interface SecondaryStorageService { } } - AsyncCallFuture migrateData(DataObject srcDataObject, DataStore srcDatastore, DataStore destDatastore, Map, Long>> snapshotChain); + AsyncCallFuture migrateData(DataObject srcDataObject, DataStore srcDatastore, DataStore destDatastore, Map, Long>> snapshotChain, + Map, Long>> templateChain); } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java index 9d044f3c7da..71b1281decb 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java @@ -32,9 +32,11 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.ImageStoreService; @@ -59,8 +61,11 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.SecondaryStorageVmVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.SecondaryStorageVmDao; +import org.apache.log4j.Logger; public class DataMigrationUtility { + private static Logger LOGGER = Logger.getLogger(DataMigrationUtility.class); + @Inject SecondaryStorageVmDao secStorageVmDao; @Inject @@ -87,19 +92,22 @@ public class DataMigrationUtility { * the migration is terminated. */ private boolean filesReadyToMigrate(Long srcDataStoreId) { - String[] validStates = new String[]{"Ready", "Allocated", "Destroying", "Destroyed", "Failed"}; + State[] validStates = {State.Ready, State.Allocated, State.Destroying, State.Destroyed, State.Failed}; boolean isReady = true; List templates = templateDataStoreDao.listByStoreId(srcDataStoreId); for (TemplateDataStoreVO template : templates) { - isReady &= (Arrays.asList(validStates).contains(template.getState().toString())); + isReady &= (Arrays.asList(validStates).contains(template.getState())); + LOGGER.trace(String.format("template state: %s", template.getState())); } List snapshots = snapshotDataStoreDao.listByStoreId(srcDataStoreId, DataStoreRole.Image); for (SnapshotDataStoreVO snapshot : snapshots) { - isReady &= (Arrays.asList(validStates).contains(snapshot.getState().toString())); + isReady &= (Arrays.asList(validStates).contains(snapshot.getState())); + LOGGER.trace(String.format("snapshot state: %s", snapshot.getState())); } List volumes = volumeDataStoreDao.listByStoreId(srcDataStoreId); for (VolumeDataStoreVO volume : volumes) { - isReady &= (Arrays.asList(validStates).contains(volume.getState().toString())); + isReady &= (Arrays.asList(validStates).contains(volume.getState())); + LOGGER.trace(String.format("volume state: %s", volume.getState())); } return isReady; } @@ -113,12 +121,17 @@ public class DataMigrationUtility { return; } - protected Long getFileSize(DataObject file, Map, Long>> snapshotChain) { - Long size = file.getSize(); + protected Long getFileSize(DataObject file, Map, Long>> snapshotChain, Map, Long>> templateChain) { + Long size = file.getPhysicalSize(); Pair, Long> chain = snapshotChain.get(file); + Pair, Long> tempChain = templateChain.get(file); + if (file instanceof SnapshotInfo && chain.first() != null && !chain.first().isEmpty()) { size = chain.second(); } + if (file instanceof TemplateInfo && tempChain.first() != null && !tempChain.first().isEmpty()) { + size = tempChain.second(); + } return size; } @@ -144,9 +157,10 @@ public class DataMigrationUtility { return new ArrayList<>(temp.keySet()); } - protected List getSortedValidSourcesList(DataStore srcDataStore, Map, Long>> snapshotChains) { + protected List getSortedValidSourcesList(DataStore srcDataStore, Map, Long>> snapshotChains, + Map, Long>> childTemplates) { List files = new ArrayList<>(); - files.addAll(getAllReadyTemplates(srcDataStore)); + files.addAll(getAllReadyTemplates(srcDataStore, childTemplates)); files.addAll(getAllReadySnapshotsAndChains(srcDataStore, snapshotChains)); files.addAll(getAllReadyVolumes(srcDataStore)); @@ -159,8 +173,8 @@ public class DataMigrationUtility { Collections.sort(files, new Comparator() { @Override public int compare(DataObject o1, DataObject o2) { - Long size1 = o1.getSize(); - Long size2 = o2.getSize(); + Long size1 = o1.getPhysicalSize(); + Long size2 = o2.getPhysicalSize(); if (o1 instanceof SnapshotInfo) { size1 = snapshotChains.get(o1).second(); } @@ -173,19 +187,28 @@ public class DataMigrationUtility { return files; } - protected List getAllReadyTemplates(DataStore srcDataStore) { + protected List getAllReadyTemplates(DataStore srcDataStore, Map, Long>> childTemplates) { - List files = new LinkedList<>(); + List files = new LinkedList<>(); List templates = templateDataStoreDao.listByStoreId(srcDataStore.getId()); for (TemplateDataStoreVO template : templates) { VMTemplateVO templateVO = templateDao.findById(template.getTemplateId()); if (template.getState() == ObjectInDataStoreStateMachine.State.Ready && templateVO != null && (!templateVO.isPublicTemplate() || (templateVO.isPublicTemplate() && templateVO.getUrl() == null)) && - templateVO.getHypervisorType() != Hypervisor.HypervisorType.Simulator) { + templateVO.getHypervisorType() != Hypervisor.HypervisorType.Simulator && templateVO.getParentTemplateId() == null) { files.add(templateFactory.getTemplate(template.getTemplateId(), srcDataStore)); } } - return files; + for (TemplateInfo template: files) { + List children = templateDao.listByParentTemplatetId(template.getId()); + List temps = new ArrayList<>(); + temps.add(template); + for(VMTemplateVO child : children) { + temps.add(templateFactory.getTemplate(child.getId(), srcDataStore)); + } + childTemplates.put(template, new Pair<>(temps, getTotalChainSize(temps))); + } + return (List) (List) files; } /** Returns parent snapshots and snapshots that do not have any children; snapshotChains comprises of the snapshot chain info @@ -217,21 +240,20 @@ public class DataMigrationUtility { chain.addAll(children); } } - snapshotChains.put(parent, new Pair, Long>(chain, getSizeForChain(chain))); + snapshotChains.put(parent, new Pair, Long>(chain, getTotalChainSize(chain))); } return (List) (List) files; } - protected Long getSizeForChain(List chain) { + protected Long getTotalChainSize(List chain) { Long size = 0L; - for (SnapshotInfo snapshot : chain) { - size += snapshot.getSize(); + for (DataObject dataObject : chain) { + size += dataObject.getPhysicalSize(); } return size; } - protected List getAllReadyVolumes(DataStore srcDataStore) { List files = new LinkedList<>(); List volumes = volumeDataStoreDao.listByStoreId(srcDataStore.getId()); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index 2046adafb1d..01c7f723ea2 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -44,6 +44,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SecondaryStorageServic import org.apache.cloudstack.engine.subsystem.api.storage.SecondaryStorageService.DataObjectResult; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; @@ -144,10 +145,11 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra migrationHelper.checkIfCompleteMigrationPossible(migrationPolicy, srcDataStoreId); DataStore srcDatastore = dataStoreManager.getDataStore(srcDataStoreId, DataStoreRole.Image); Map, Long>> snapshotChains = new HashMap<>(); - files = migrationHelper.getSortedValidSourcesList(srcDatastore, snapshotChains); + Map, Long>> childTemplates = new HashMap<>(); + files = migrationHelper.getSortedValidSourcesList(srcDatastore, snapshotChains, childTemplates); if (files.isEmpty()) { - return new MigrationResponse("No files in Image store "+srcDatastore.getId()+ " to migrate", migrationPolicy.toString(), true); + return new MigrationResponse(String.format("No files in Image store: %s to migrate", srcDatastore.getId()), migrationPolicy.toString(), true); } Map> storageCapacities = new Hashtable<>(); for (Long storeId : destDatastores) { @@ -155,7 +157,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra } storageCapacities.put(srcDataStoreId, new Pair<>(null, null)); if (migrationPolicy == MigrationPolicy.COMPLETE) { - s_logger.debug("Setting source image store "+srcDatastore.getId()+ " to read-only"); + s_logger.debug(String.format("Setting source image store: %s to read-only", srcDatastore.getId())); storageService.updateImageStoreStatus(srcDataStoreId, true); } @@ -172,6 +174,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra return response; } + int skipped = 0; List>> futures = new ArrayList<>(); while (true) { DataObject chosenFileForMigration = null; @@ -184,7 +187,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra Long destDatastoreId = orderedDS.get(0); if (chosenFileForMigration == null || destDatastoreId == null || (destDatastoreId == srcDatastore.getId() && migrationPolicy == MigrationPolicy.BALANCE) ) { - Pair result = migrateCompleted(destDatastoreId, srcDatastore, files, migrationPolicy); + Pair result = migrateCompleted(destDatastoreId, srcDatastore, files, migrationPolicy, skipped); message = result.first(); success = result.second(); break; @@ -194,13 +197,14 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra destDatastoreId = orderedDS.get(1); } - if (chosenFileForMigration.getSize() > storageCapacities.get(destDatastoreId).first()) { - s_logger.debug("file: " + chosenFileForMigration.getId() + " too large to be migrated to " + destDatastoreId); + if (chosenFileForMigration.getPhysicalSize() > storageCapacities.get(destDatastoreId).first()) { + s_logger.debug(String.format("%s: %s too large to be migrated to %s", chosenFileForMigration.getType().name() , chosenFileForMigration.getUuid(), destDatastoreId)); + skipped += 1; continue; } - if (shouldMigrate(chosenFileForMigration, srcDatastore.getId(), destDatastoreId, storageCapacities, snapshotChains, migrationPolicy)) { - storageCapacities = migrateAway(chosenFileForMigration, storageCapacities, snapshotChains, srcDatastore, destDatastoreId, executor, futures); + if (shouldMigrate(chosenFileForMigration, srcDatastore.getId(), destDatastoreId, storageCapacities, snapshotChains, childTemplates, migrationPolicy)) { + storageCapacities = migrateAway(chosenFileForMigration, storageCapacities, snapshotChains, childTemplates, srcDatastore, destDatastoreId, executor, futures); } else { if (migrationPolicy == MigrationPolicy.BALANCE) { continue; @@ -215,7 +219,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra return handleResponse(futures, migrationPolicy, message, success); } - protected Pair migrateCompleted(Long destDatastoreId, DataStore srcDatastore, List files, MigrationPolicy migrationPolicy) { + protected Pair migrateCompleted(Long destDatastoreId, DataStore srcDatastore, List files, MigrationPolicy migrationPolicy, int skipped) { String message = ""; boolean success = true; if (destDatastoreId == srcDatastore.getId() && !files.isEmpty()) { @@ -233,14 +237,27 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra } } else { message = "Migration completed"; + if (migrationPolicy == MigrationPolicy.COMPLETE && skipped > 0) { + message += ". Not all data objects were migrated. Some were probably skipped due to lack of storage capacity."; + success = false; + } } return new Pair(message, success); } - protected Map> migrateAway(DataObject chosenFileForMigration, Map> storageCapacities, - Map, Long>> snapshotChains, DataStore srcDatastore, Long destDatastoreId, ThreadPoolExecutor executor, - List>> futures) { - Long fileSize = migrationHelper.getFileSize(chosenFileForMigration, snapshotChains); + protected Map> migrateAway( + DataObject chosenFileForMigration, + Map> storageCapacities, + Map, Long>> snapshotChains, + Map, Long>> templateChains, + DataStore srcDatastore, + Long destDatastoreId, + ThreadPoolExecutor executor, + List>> futures) { + + Long fileSize = migrationHelper.getFileSize(chosenFileForMigration, snapshotChains, templateChains); + storageCapacities = assumeMigrate(storageCapacities, srcDatastore.getId(), destDatastoreId, fileSize); long activeSsvms = migrationHelper.activeSSVMCount(srcDatastore); long totalJobs = activeSsvms * numConcurrentCopyTasksPerSSVM; @@ -254,8 +271,11 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra if (chosenFileForMigration instanceof SnapshotInfo ) { task.setSnapshotChains(snapshotChains); } + if (chosenFileForMigration instanceof TemplateInfo) { + task.setTemplateChain(templateChains); + } futures.add((executor.submit(task))); - s_logger.debug("Migration of file " + chosenFileForMigration.getId() + " is initiated"); + s_logger.debug(String.format("Migration of %s: %s is initiated. ", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid())); return storageCapacities; } @@ -374,13 +394,19 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra * @param migrationPolicy determines whether a "Balance" or "Complete" migration operation is to be performed * @return */ - private boolean shouldMigrate(DataObject chosenFile, Long srcDatastoreId, Long destDatastoreId, Map> storageCapacities, - Map, Long>> snapshotChains, MigrationPolicy migrationPolicy) { + private boolean shouldMigrate( + DataObject chosenFile, + Long srcDatastoreId, + Long destDatastoreId, + Map> storageCapacities, + Map, Long>> snapshotChains, + Map, Long>> templateChains, + MigrationPolicy migrationPolicy) { if (migrationPolicy == MigrationPolicy.BALANCE) { double meanStdDevCurrent = getStandardDeviation(storageCapacities); - Long fileSize = migrationHelper.getFileSize(chosenFile, snapshotChains); + Long fileSize = migrationHelper.getFileSize(chosenFile, snapshotChains, templateChains); Map> proposedCapacities = assumeMigrate(storageCapacities, srcDatastoreId, destDatastoreId, fileSize); double meanStdDevAfter = getStandardDeviation(proposedCapacities); @@ -426,6 +452,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra private DataStore srcDataStore; private DataStore destDataStore; private Map, Long>> snapshotChain; + private Map, Long>> templateChain; public MigrateDataTask(DataObject file, DataStore srcDataStore, DataStore destDataStore) { this.file = file; this.srcDataStore = srcDataStore; @@ -439,13 +466,19 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra public Map, Long>> getSnapshotChain() { return snapshotChain; } + public Map, Long>> getTemplateChain() { + return templateChain; + } + public void setTemplateChain(Map, Long>> templateChain) { + this.templateChain = templateChain; + } public DataObject getFile() { return file; } @Override public AsyncCallFuture call() throws Exception { - return secStgSrv.migrateData(file, srcDataStore, destDataStore, snapshotChain); + return secStgSrv.migrateData(file, srcDataStore, destDataStore, snapshotChain, templateChain); } } } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java index 523c240c0ee..5b6d806fb09 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java @@ -83,7 +83,8 @@ public class SecondaryStorageServiceImpl implements SecondaryStorageService { } @Override - public AsyncCallFuture migrateData(DataObject srcDataObject, DataStore srcDatastore, DataStore destDatastore, Map, Long>> snapshotChain) { + public AsyncCallFuture migrateData(DataObject srcDataObject, DataStore srcDatastore, DataStore destDatastore, + Map, Long>> snapshotChain, Map, Long>> templateChain) { AsyncCallFuture future = new AsyncCallFuture(); DataObjectResult res = new DataObjectResult(srcDataObject); DataObject destDataObject = null; @@ -114,7 +115,15 @@ public class SecondaryStorageServiceImpl implements SecondaryStorageService { break; } } - } else { + } else if (srcDataObject instanceof TemplateInfo && templateChain != null && templateChain.containsKey(srcDataObject)) { + for (TemplateInfo templateInfo : templateChain.get(srcDataObject).first()) { + destDataObject = destDatastore.create(templateInfo); + templateInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrateDataRequested); + destDataObject.processEvent(ObjectInDataStoreStateMachine.Event.MigrateDataRequested); + migrateJob(future, templateInfo, destDataObject, destDatastore); + } + } + else { // Check if template in destination store, if yes, do not proceed if (srcDataObject instanceof TemplateInfo) { s_logger.debug("Checking if template present at destination"); diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java index c8b78aa9c91..8f857a2da97 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -141,6 +141,15 @@ public class TemplateObject implements TemplateInfo { return image.getSize(); } + @Override + public long getPhysicalSize() { + TemplateDataStoreVO templateDataStoreVO = templateStoreDao.findByTemplate(imageVO.getId(), DataStoreRole.Image); + if (templateDataStoreVO != null) { + return templateDataStoreVO.getPhysicalSize(); + } + return imageVO.getSize(); + } + @Override public DataObjectType getType() { return DataObjectType.TEMPLATE; diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java index 705d8120bcd..29405be79be 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -231,6 +231,15 @@ public class VolumeObject implements VolumeInfo { return diskOfferingId == null ? null : diskOfferingDao.findById(diskOfferingId); } + @Override + public long getPhysicalSize() { + VolumeDataStoreVO volumeDataStoreVO = volumeStoreDao.findByVolume(volumeVO.getId()); + if (volumeDataStoreVO != null) { + return volumeDataStoreVO.getPhysicalSize(); + } + return volumeVO.getSize(); + } + @Override public Long getBytesReadRate() { return getLongValueFromDiskOfferingVoMethod(DiskOfferingVO::getBytesReadRate); diff --git a/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java b/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java index 7736e63a657..ebe1d7fbc73 100644 --- a/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java +++ b/server/src/main/java/org/apache/cloudstack/diagnostics/to/DiagnosticsDataObject.java @@ -59,6 +59,11 @@ public class DiagnosticsDataObject implements DataObject { return null; } + @Override + public long getPhysicalSize() { + return 0; + } + @Override public DataObjectType getType() { return dataTO.getObjectType(); From 3cbde8cd6c75477e67866f1a771209c1d16c94f2 Mon Sep 17 00:00:00 2001 From: Hoang Nguyen Date: Thu, 6 Jan 2022 19:11:54 +0700 Subject: [PATCH 3/3] UI - Hide shrink disk option on XCP-NG/Xenserver (#5829) * hide shrink disk option on XCP-NG/Xenserver --- ui/src/views/storage/ResizeVolume.vue | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/src/views/storage/ResizeVolume.vue b/ui/src/views/storage/ResizeVolume.vue index 8c23fa3606b..6327dca63b6 100644 --- a/ui/src/views/storage/ResizeVolume.vue +++ b/ui/src/views/storage/ResizeVolume.vue @@ -48,7 +48,7 @@ :autoFocus="customDiskOffering || resource.type === 'ROOT'"/> - +