From a4b1a27c7d2d93a1da698a7115900b3205c1077e Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Mon, 19 Jan 2026 13:20:07 +0530 Subject: [PATCH 001/126] ui: fix 404 on login after forgot password (#12448) Signed-off-by: Abhishek Kumar --- ui/src/views/auth/ForgotPassword.vue | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/src/views/auth/ForgotPassword.vue b/ui/src/views/auth/ForgotPassword.vue index 87f2d1d0c33..1e817e01a6e 100644 --- a/ui/src/views/auth/ForgotPassword.vue +++ b/ui/src/views/auth/ForgotPassword.vue @@ -162,7 +162,7 @@ export default { api('forgotPassword', {}, 'POST', loginParams) .finally(() => { this.$message.success(this.$t('message.forgot.password.success')) - this.$router.push({ path: '/login' }).catch(() => {}) + this.$router.replace({ path: '/user/login' }) }) }).catch(error => { this.formRef.value.scrollToField(error.errorFields[0].name) From 42f1e19362ab9030117dc3808dbf3854b6a9f92d Mon Sep 17 00:00:00 2001 From: Manoj Kumar Date: Mon, 19 Jan 2026 18:50:18 +0530 Subject: [PATCH 002/126] Mask vncPasswd being logged in agent.log (#12404) --- .../wrapper/LibvirtMigrateCommandWrapper.java | 24 ++++++++++------- .../wrapper/LibvirtStartCommandWrapper.java | 5 ++-- .../LibvirtMigrateCommandWrapperTest.java | 26 ++++++++++++++++++- 3 files changed, 42 insertions(+), 13 deletions(-) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java index 32f2a4b122c..1f14402c85e 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java @@ -158,7 +158,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper dpdkPortsMapping = command.getDpdkInterfaceMapping(); if (MapUtils.isNotEmpty(dpdkPortsMapping)) { if (logger.isTraceEnabled()) { - logger.trace(String.format("Changing VM [%s] DPDK interfaces during migration to host: [%s].", vmName, target)); + logger.trace("Changing VM {} DPDK interfaces during migration to host: {}.", vmName, target); } xmlDesc = replaceDpdkInterfaces(xmlDesc, dpdkPortsMapping); if (logger.isDebugEnabled()) { - logger.debug(String.format("Changed VM [%s] XML configuration of DPDK interfaces. New XML configuration is [%s].", vmName, xmlDesc)); + logger.debug("Changed VM {} XML configuration of DPDK interfaces. New XML configuration is {}.", vmName, maskSensitiveInfoInXML(xmlDesc)); } } @@ -233,7 +233,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper]*type=['\"]vnc['\"][^>]*passwd=['\"])([^'\"]*)(['\"])", + "$1*****$3"); + } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java index a174c9a6f14..6e978715755 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java @@ -80,8 +80,9 @@ public final class LibvirtStartCommandWrapper extends CommandWrapper"; + String expected2 = ""; + assertEquals(expected2, LibvirtMigrateCommandWrapper.maskSensitiveInfoInXML(xml2)); + + // Test case 3: Non-VNC graphics (should remain unchanged) + String xml3 = ""; + assertEquals(xml3, LibvirtMigrateCommandWrapper.maskSensitiveInfoInXML(xml3)); + + // Test case 4: Multiple VNC entries in one string + String xml4 = "\n" + + ""; + String expected4 = "\n" + + ""; + assertEquals(expected4, LibvirtMigrateCommandWrapper.maskSensitiveInfoInXML(xml4)); + } } From 2a6ce0c8a810e908139f06d6e12d365801af4422 Mon Sep 17 00:00:00 2001 From: Vitor Hugo Homem Marzarotto <59698484+vits-hugs@users.noreply.github.com> Date: Tue, 20 Jan 2026 04:10:42 -0300 Subject: [PATCH 003/126] Adds url kubernetes iso (#10862) Co-authored-by: Vitor Hugo Homem Marzarotto Co-authored-by: Henrique Sato --- .../apache/cloudstack/api/ApiConstants.java | 1 + .../version/KubernetesVersionManagerImpl.java | 19 ++-- .../KubernetesSupportedVersionResponse.java | 12 +++ .../KubernetesVersionManagerImplTest.java | 6 +- .../version/KubernetesVersionServiceTest.java | 90 +++++++++++++++---- ui/public/locales/en.json | 1 + ui/public/locales/pt_BR.json | 1 + ui/src/config/section/image.js | 6 +- 8 files changed, 105 insertions(+), 31 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 4abc0d13d74..daf1bdc705d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -1097,6 +1097,7 @@ public class ApiConstants { public static final String DOCKER_REGISTRY_EMAIL = "dockerregistryemail"; public static final String ISO_NAME = "isoname"; public static final String ISO_STATE = "isostate"; + public static final String ISO_URL = "isourl"; public static final String SEMANTIC_VERSION = "semanticversion"; public static final String KUBERNETES_VERSION_ID = "kubernetesversionid"; public static final String KUBERNETES_VERSION_NAME = "kubernetesversionname"; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java index 99c9a4de051..7b126b2fba0 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java @@ -53,6 +53,7 @@ import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateZoneDao; import com.cloud.template.TemplateApiService; import com.cloud.template.VirtualMachineTemplate; +import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.utils.Pair; import com.cloud.utils.component.ComponentContext; @@ -85,7 +86,7 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne public static final String MINIMUN_AUTOSCALER_SUPPORTED_VERSION = "1.15.0"; protected void updateTemplateDetailsInKubernetesSupportedVersionResponse( - final KubernetesSupportedVersion kubernetesSupportedVersion, KubernetesSupportedVersionResponse response) { + final KubernetesSupportedVersion kubernetesSupportedVersion, KubernetesSupportedVersionResponse response, boolean isRootAdmin) { TemplateJoinVO template = templateJoinDao.findById(kubernetesSupportedVersion.getIsoId()); if (template == null) { return; @@ -95,11 +96,14 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne if (template.getState() != null) { response.setIsoState(template.getState().toString()); } + if (isRootAdmin) { + response.setIsoUrl(template.getUrl()); + } response.setIsoArch(template.getArch().getType()); response.setDirectDownload(template.isDirectDownload()); } - private KubernetesSupportedVersionResponse createKubernetesSupportedVersionResponse(final KubernetesSupportedVersion kubernetesSupportedVersion) { + private KubernetesSupportedVersionResponse createKubernetesSupportedVersionResponse(final KubernetesSupportedVersion kubernetesSupportedVersion, boolean isRootAdmin) { KubernetesSupportedVersionResponse response = new KubernetesSupportedVersionResponse(); response.setObjectName("kubernetessupportedversion"); response.setId(kubernetesSupportedVersion.getUuid()); @@ -118,7 +122,7 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne response.setSupportsHA(compareSemanticVersions(kubernetesSupportedVersion.getSemanticVersion(), KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT)>=0); response.setSupportsAutoscaling(versionSupportsAutoscaling(kubernetesSupportedVersion)); - updateTemplateDetailsInKubernetesSupportedVersionResponse(kubernetesSupportedVersion, response); + updateTemplateDetailsInKubernetesSupportedVersionResponse(kubernetesSupportedVersion, response, isRootAdmin); response.setCreated(kubernetesSupportedVersion.getCreated()); return response; } @@ -126,8 +130,11 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne private ListResponse createKubernetesSupportedVersionListResponse( List versions, Integer count) { List responseList = new ArrayList<>(); + Account caller = CallContext.current().getCallingAccount(); + boolean isRootAdmin = accountManager.isRootAdmin(caller.getId()); + for (KubernetesSupportedVersionVO version : versions) { - responseList.add(createKubernetesSupportedVersionResponse(version)); + responseList.add(createKubernetesSupportedVersionResponse(version, isRootAdmin)); } ListResponse response = new ListResponse<>(); response.setResponses(responseList, count); @@ -374,7 +381,7 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne supportedVersionVO = kubernetesSupportedVersionDao.persist(supportedVersionVO); CallContext.current().putContextParameter(KubernetesSupportedVersion.class, supportedVersionVO.getUuid()); - return createKubernetesSupportedVersionResponse(supportedVersionVO); + return createKubernetesSupportedVersionResponse(supportedVersionVO, true); } @Override @@ -435,7 +442,7 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne } version = kubernetesSupportedVersionDao.findById(versionId); } - return createKubernetesSupportedVersionResponse(version); + return createKubernetesSupportedVersionResponse(version, true); } @Override diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java index cfa3212e409..f6e1ee85944 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java @@ -50,6 +50,10 @@ public class KubernetesSupportedVersionResponse extends BaseResponse { @Param(description = "The name of the binaries ISO for Kubernetes supported version") private String isoName; + @SerializedName(ApiConstants.ISO_URL) + @Param(description = "the URL of the binaries ISO for Kubernetes supported version") + private String isoUrl; + @SerializedName(ApiConstants.ISO_STATE) @Param(description = "The state of the binaries ISO for Kubernetes supported version") private String isoState; @@ -134,6 +138,14 @@ public class KubernetesSupportedVersionResponse extends BaseResponse { this.isoName = isoName; } + public String getIsoUrl() { + return isoUrl; + } + + public void setIsoUrl(String isoUrl) { + this.isoUrl = isoUrl; + } + public String getIsoState() { return isoState; } diff --git a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionManagerImplTest.java b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionManagerImplTest.java index f827610c3cb..bbec555e8e5 100644 --- a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionManagerImplTest.java +++ b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionManagerImplTest.java @@ -48,7 +48,7 @@ public class KubernetesVersionManagerImplTest { Mockito.when(kubernetesSupportedVersion.getIsoId()).thenReturn(1L); KubernetesSupportedVersionResponse response = new KubernetesSupportedVersionResponse(); kubernetesVersionManager.updateTemplateDetailsInKubernetesSupportedVersionResponse(kubernetesSupportedVersion, - response); + response, true); Assert.assertNull(ReflectionTestUtils.getField(response, "isoId")); } @@ -63,13 +63,13 @@ public class KubernetesVersionManagerImplTest { Mockito.when(templateJoinVO.getUuid()).thenReturn(uuid); Mockito.when(templateJoinDao.findById(1L)).thenReturn(templateJoinVO); kubernetesVersionManager.updateTemplateDetailsInKubernetesSupportedVersionResponse(kubernetesSupportedVersion, - response); + response, true); Assert.assertEquals(uuid, ReflectionTestUtils.getField(response, "isoId")); Assert.assertNull(ReflectionTestUtils.getField(response, "isoState")); ObjectInDataStoreStateMachine.State state = ObjectInDataStoreStateMachine.State.Ready; Mockito.when(templateJoinVO.getState()).thenReturn(state); kubernetesVersionManager.updateTemplateDetailsInKubernetesSupportedVersionResponse(kubernetesSupportedVersion, - response); + response, true); Assert.assertEquals(state.toString(), ReflectionTestUtils.getField(response, "isoState")); } } diff --git a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionServiceTest.java b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionServiceTest.java index 455df6b57d4..b874a9a0ffa 100644 --- a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionServiceTest.java +++ b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionServiceTest.java @@ -17,6 +17,9 @@ package com.cloud.kubernetes.version; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertNotNull; +import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.when; import java.lang.reflect.Field; @@ -25,6 +28,11 @@ import java.util.List; import java.util.UUID; import com.cloud.cpu.CPU; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountVO; +import com.cloud.user.User; +import com.cloud.user.UserVO; import org.apache.cloudstack.api.command.admin.kubernetes.version.AddKubernetesSupportedVersionCmd; import org.apache.cloudstack.api.command.admin.kubernetes.version.DeleteKubernetesSupportedVersionCmd; import org.apache.cloudstack.api.command.admin.kubernetes.version.UpdateKubernetesSupportedVersionCmd; @@ -63,11 +71,6 @@ import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.template.TemplateApiService; import com.cloud.template.VirtualMachineTemplate; -import com.cloud.user.Account; -import com.cloud.user.AccountManager; -import com.cloud.user.AccountVO; -import com.cloud.user.User; -import com.cloud.user.UserVO; import com.cloud.utils.Pair; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.db.Filter; @@ -95,6 +98,8 @@ public class KubernetesVersionServiceTest { private DataCenterDao dataCenterDao; @Mock private TemplateApiService templateService; + @Mock + private Account accountMock; AutoCloseable closeable; @@ -124,6 +129,7 @@ public class KubernetesVersionServiceTest { when(dataCenterDao.findById(Mockito.anyLong())).thenReturn(zone); TemplateJoinVO templateJoinVO = Mockito.mock(TemplateJoinVO.class); + when(templateJoinVO.getUrl()).thenReturn("https://download.cloudstack.com"); when(templateJoinVO.getState()).thenReturn(ObjectInDataStoreStateMachine.State.Ready); when(templateJoinVO.getArch()).thenReturn(CPU.CPUArch.getDefault()); when(templateJoinDao.findById(Mockito.anyLong())).thenReturn(templateJoinVO); @@ -140,19 +146,66 @@ public class KubernetesVersionServiceTest { @Test public void listKubernetesSupportedVersionsTest() { - ListKubernetesSupportedVersionsCmd cmd = Mockito.mock(ListKubernetesSupportedVersionsCmd.class); - List versionVOs = new ArrayList<>(); - KubernetesSupportedVersionVO versionVO = Mockito.mock(KubernetesSupportedVersionVO.class); - when(versionVO.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION); - versionVOs.add(versionVO); - when(kubernetesSupportedVersionDao.findById(Mockito.anyLong())).thenReturn(versionVO); - when(kubernetesSupportedVersionDao.searchAndCount(Mockito.any(SearchCriteria.class), - Mockito.any(Filter.class))).thenReturn(new Pair<>(versionVOs, versionVOs.size())); - ListResponse versionsResponse = - kubernetesVersionService.listKubernetesSupportedVersions(cmd); - Assert.assertEquals(versionVOs.size(), versionsResponse.getCount().intValue()); - Assert.assertTrue(CollectionUtils.isNotEmpty(versionsResponse.getResponses())); - Assert.assertEquals(versionVOs.size(), versionsResponse.getResponses().size()); + CallContext callContextMock = Mockito.mock(CallContext.class); + try (MockedStatic callContextMockedStatic = Mockito.mockStatic(CallContext.class)) { + callContextMockedStatic.when(CallContext::current).thenReturn(callContextMock); + final SearchCriteria versionSearchCriteria = Mockito.mock(SearchCriteria.class); + when(callContextMock.getCallingAccount()).thenReturn(accountMock); + ListKubernetesSupportedVersionsCmd cmd = Mockito.mock(ListKubernetesSupportedVersionsCmd.class); + List versionVOs = new ArrayList<>(); + KubernetesSupportedVersionVO versionVO = Mockito.mock(KubernetesSupportedVersionVO.class); + when(versionVO.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION); + versionVOs.add(versionVO); + when(kubernetesSupportedVersionDao.findById(Mockito.anyLong())).thenReturn(versionVO); + when(kubernetesSupportedVersionDao.searchAndCount(Mockito.any(), Mockito.any(Filter.class))) + .thenReturn(new Pair<>(versionVOs, versionVOs.size())); + ListResponse versionsResponse = + kubernetesVersionService.listKubernetesSupportedVersions(cmd); + Assert.assertEquals(versionVOs.size(), versionsResponse.getCount().intValue()); + Assert.assertTrue(CollectionUtils.isNotEmpty(versionsResponse.getResponses())); + Assert.assertEquals(versionVOs.size(), versionsResponse.getResponses().size()); + } + } + + @Test + public void listKubernetesSupportedVersionsTestWhenAdmin() { + CallContext callContextMock = Mockito.mock(CallContext.class); + try (MockedStatic callContextMockedStatic = Mockito.mockStatic(CallContext.class)) { + callContextMockedStatic.when(CallContext::current).thenReturn(callContextMock); + ListKubernetesSupportedVersionsCmd cmd = Mockito.mock(ListKubernetesSupportedVersionsCmd.class); + List versionVOs = new ArrayList<>(); + KubernetesSupportedVersionVO versionVO = Mockito.mock(KubernetesSupportedVersionVO.class); + when(versionVO.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION); + versionVOs.add(versionVO); + when(callContextMock.getCallingAccount()).thenReturn(accountMock); + when(kubernetesSupportedVersionDao.findById(Mockito.anyLong())).thenReturn(versionVO); + when(kubernetesSupportedVersionDao.searchAndCount(Mockito.any(), Mockito.any(Filter.class))) + .thenReturn(new Pair<>(versionVOs, versionVOs.size())); + when(accountManager.isRootAdmin(anyLong())).thenReturn(true); + ListResponse response = kubernetesVersionService.listKubernetesSupportedVersions(cmd); + assertNotNull(response.getResponses().get(0).getIsoUrl()); + } + } + + @Test + public void listKubernetesSupportedVersionsTestWhenOtherUser() { + CallContext callContextMock = Mockito.mock(CallContext.class); + try (MockedStatic callContextMockedStatic = Mockito.mockStatic(CallContext.class)) { + callContextMockedStatic.when(CallContext::current).thenReturn(callContextMock); + ListKubernetesSupportedVersionsCmd cmd = Mockito.mock(ListKubernetesSupportedVersionsCmd.class); + List versionVOs = new ArrayList<>(); + KubernetesSupportedVersionVO versionVO = Mockito.mock(KubernetesSupportedVersionVO.class); + when(versionVO.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION); + versionVOs.add(versionVO); + when(callContextMock.getCallingAccount()).thenReturn(accountMock); + when(kubernetesSupportedVersionDao.findById(Mockito.anyLong())).thenReturn(versionVO); + when(kubernetesSupportedVersionDao.searchAndCount(Mockito.any(), Mockito.any(Filter.class))) + .thenReturn(new Pair<>(versionVOs, versionVOs.size())); + when(accountManager.isRootAdmin(anyLong())).thenReturn(false); + when(accountMock.getId()).thenReturn(2L); + ListResponse response = kubernetesVersionService.listKubernetesSupportedVersions(cmd); + assertNull(response.getResponses().get(0).getIsoUrl()); + } } @Test(expected = InvalidParameterValueException.class) @@ -224,7 +277,6 @@ public class KubernetesVersionServiceTest { mockedComponentContext.when(() -> ComponentContext.inject(Mockito.any(RegisterIsoCmd.class))).thenReturn( new RegisterIsoCmd()); mockedCallContext.when(CallContext::current).thenReturn(callContext); - when(templateService.registerIso(Mockito.any(RegisterIsoCmd.class))).thenReturn( Mockito.mock(VirtualMachineTemplate.class)); VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class); diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 791091e8e2a..aaf499d2f95 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -1254,6 +1254,7 @@ "label.isoname": "Attached ISO", "label.isos": "ISOs", "label.isostate": "ISO state", +"label.isourl": "ISO URL", "label.ispersistent": "Persistent ", "label.ispublic": "Public", "label.isready": "Ready", diff --git a/ui/public/locales/pt_BR.json b/ui/public/locales/pt_BR.json index 4d95b341ab4..1b51bc438e5 100644 --- a/ui/public/locales/pt_BR.json +++ b/ui/public/locales/pt_BR.json @@ -874,6 +874,7 @@ "label.isoname": "Imagem ISO plugada", "label.isos": "ISOs", "label.isostate": "Estado da ISO", +"label.isourl": "URL da ISO", "label.ispersistent": "Persistente", "label.ispublic": "P\u00fablico", "label.isready": "Pronto", diff --git a/ui/src/config/section/image.js b/ui/src/config/section/image.js index 46dec2e1b24..3f8286c5fb1 100644 --- a/ui/src/config/section/image.js +++ b/ui/src/config/section/image.js @@ -60,9 +60,9 @@ export default { details: () => { var fields = ['name', 'id', 'displaytext', 'checksum', 'hypervisor', 'arch', 'format', 'ostypename', 'size', 'physicalsize', 'isready', 'passwordenabled', 'crossZones', 'templatetype', 'directdownload', 'deployasis', 'ispublic', 'isfeatured', 'isextractable', 'isdynamicallyscalable', 'crosszones', 'type', - 'account', 'domain', 'created', 'userdatadetails', 'userdatapolicy'] + 'account', 'domain', 'created', 'userdatadetails', 'userdatapolicy', 'url'] if (['Admin'].includes(store.getters.userInfo.roletype)) { - fields.push('templatetag', 'templatetype', 'url') + fields.push('templatetag', 'templatetype') } return fields }, @@ -372,7 +372,7 @@ export default { permission: ['listKubernetesSupportedVersions'], searchFilters: ['zoneid', 'minimumsemanticversion', 'arch'], columns: ['name', 'state', 'semanticversion', 'isostate', 'mincpunumber', 'minmemory', 'arch', 'zonename'], - details: ['name', 'semanticversion', 'supportsautoscaling', 'zoneid', 'zonename', 'isoid', 'isoname', 'isostate', 'arch', 'mincpunumber', 'minmemory', 'supportsha', 'state', 'created'], + details: ['name', 'semanticversion', 'supportsautoscaling', 'zoneid', 'zonename', 'isoid', 'isoname', 'isostate', 'arch', 'mincpunumber', 'minmemory', 'supportsha', 'state', 'created', 'isourl'], tabs: [ { name: 'details', From 03d24ff851d491dbc0145014b4bd967371127c50 Mon Sep 17 00:00:00 2001 From: Henrique Sato Date: Tue, 20 Jan 2026 04:12:16 -0300 Subject: [PATCH 004/126] Fix NPE on primary storage delete (#11817) --- .../main/java/com/cloud/storage/StorageManagerImpl.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 8b5e0b24f48..8392c85527d 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -1558,14 +1558,18 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C protected String getStoragePoolNonDestroyedVolumesLog(long storagePoolId) { StringBuilder sb = new StringBuilder(); - List nonDestroyedVols = volumeDao.findByPoolId(storagePoolId, null).stream().filter(vol -> vol.getState() != Volume.State.Destroy).collect(Collectors.toList()); + List nonDestroyedVols = volumeDao.findByPoolId(storagePoolId, null); VMInstanceVO volInstance; List logMessageInfo = new ArrayList<>(); sb.append("["); for (VolumeVO vol : nonDestroyedVols) { volInstance = _vmInstanceDao.findById(vol.getInstanceId()); - logMessageInfo.add(String.format("Volume [%s] (attached to VM [%s])", vol.getUuid(), volInstance.getUuid())); + if (volInstance != null) { + logMessageInfo.add(String.format("Volume [%s] (attached to VM [%s])", vol.getUuid(), volInstance.getUuid())); + } else { + logMessageInfo.add(String.format("Volume [%s]", vol.getUuid())); + } } sb.append(String.join(", ", logMessageInfo)); sb.append("]"); From da518e903621187e4cb75af9f550b7c6d57666f9 Mon Sep 17 00:00:00 2001 From: Daman Arora <61474540+Damans227@users.noreply.github.com> Date: Tue, 20 Jan 2026 02:13:15 -0500 Subject: [PATCH 005/126] CKS: Add image store validation for Kubernetes version registration (#12418) Co-authored-by: Daman Arora --- .../version/KubernetesVersionManagerImpl.java | 33 +++++++++ .../KubernetesVersionManagerImplTest.java | 72 +++++++++++++++++++ .../version/KubernetesVersionServiceTest.java | 9 +++ 3 files changed, 114 insertions(+) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java index 7b126b2fba0..8363f6f87e3 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java @@ -33,6 +33,9 @@ import org.apache.cloudstack.api.command.user.kubernetes.version.ListKubernetesS import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; +import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import com.cloud.api.query.dao.TemplateJoinDao; @@ -81,6 +84,8 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne @Inject private DataCenterDao dataCenterDao; @Inject + private ImageStoreDao imageStoreDao; + @Inject private TemplateApiService templateService; public static final String MINIMUN_AUTOSCALER_SUPPORTED_VERSION = "1.15.0"; @@ -323,6 +328,32 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne return createKubernetesSupportedVersionListResponse(versions, versionsAndCount.second()); } + private void validateImageStoreForZone(Long zoneId, boolean directDownload) { + if (directDownload) { + return; + } + if (zoneId != null) { + List imageStores = imageStoreDao.listStoresByZoneId(zoneId); + if (CollectionUtils.isEmpty(imageStores)) { + DataCenterVO zone = dataCenterDao.findById(zoneId); + String zoneName = zone != null ? zone.getName() : String.valueOf(zoneId); + throw new InvalidParameterValueException(String.format("Unable to register Kubernetes version ISO. No image store available in zone: %s", zoneName)); + } + } else { + List zones = dataCenterDao.listAllZones(); + List zonesWithoutStorage = new ArrayList<>(); + for (DataCenterVO zone : zones) { + List imageStores = imageStoreDao.listStoresByZoneId(zone.getId()); + if (CollectionUtils.isEmpty(imageStores)) { + zonesWithoutStorage.add(zone.getName()); + } + } + if (!zonesWithoutStorage.isEmpty()) { + throw new InvalidParameterValueException(String.format("Unable to register Kubernetes version ISO for all zones. The following zones have no image store: %s", String.join(", ", zonesWithoutStorage))); + } + } + } + @Override @ActionEvent(eventType = KubernetesVersionEventTypes.EVENT_KUBERNETES_VERSION_ADD, eventDescription = "Adding Kubernetes supported version") @@ -368,6 +399,8 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne } } + validateImageStoreForZone(zoneId, isDirectDownload); + VMTemplateVO template = null; try { VirtualMachineTemplate vmTemplate = registerKubernetesVersionIso(zoneId, name, isoUrl, isoChecksum, isDirectDownload, arch); diff --git a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionManagerImplTest.java b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionManagerImplTest.java index bbec555e8e5..35f8e66e045 100644 --- a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionManagerImplTest.java +++ b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionManagerImplTest.java @@ -16,10 +16,15 @@ // under the License. package com.cloud.kubernetes.version; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; import java.util.UUID; import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; +import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; @@ -32,6 +37,9 @@ import org.springframework.test.util.ReflectionTestUtils; import com.cloud.api.query.dao.TemplateJoinDao; import com.cloud.api.query.vo.TemplateJoinVO; import com.cloud.cpu.CPU; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.exception.InvalidParameterValueException; @RunWith(MockitoJUnitRunner.class) public class KubernetesVersionManagerImplTest { @@ -39,6 +47,12 @@ public class KubernetesVersionManagerImplTest { @Mock TemplateJoinDao templateJoinDao; + @Mock + ImageStoreDao imageStoreDao; + + @Mock + DataCenterDao dataCenterDao; + @InjectMocks KubernetesVersionManagerImpl kubernetesVersionManager = new KubernetesVersionManagerImpl(); @@ -72,4 +86,62 @@ public class KubernetesVersionManagerImplTest { response, true); Assert.assertEquals(state.toString(), ReflectionTestUtils.getField(response, "isoState")); } + + @Test + public void testValidateImageStoreForZoneWithDirectDownload() { + ReflectionTestUtils.invokeMethod(kubernetesVersionManager, "validateImageStoreForZone", 1L, true); + } + + @Test + public void testValidateImageStoreForZoneWithValidZone() { + Long zoneId = 1L; + List imageStores = Collections.singletonList(Mockito.mock(ImageStoreVO.class)); + Mockito.when(imageStoreDao.listStoresByZoneId(zoneId)).thenReturn(imageStores); + + ReflectionTestUtils.invokeMethod(kubernetesVersionManager, "validateImageStoreForZone", zoneId, false); + } + + @Test(expected = InvalidParameterValueException.class) + public void testValidateImageStoreForZoneWithNoImageStore() { + Long zoneId = 1L; + DataCenterVO zone = Mockito.mock(DataCenterVO.class); + Mockito.when(zone.getName()).thenReturn("test-zone"); + Mockito.when(dataCenterDao.findById(zoneId)).thenReturn(zone); + Mockito.when(imageStoreDao.listStoresByZoneId(zoneId)).thenReturn(Collections.emptyList()); + + ReflectionTestUtils.invokeMethod(kubernetesVersionManager, "validateImageStoreForZone", zoneId, false); + } + + @Test + public void testValidateImageStoreForAllZonesWithAllValid() { + DataCenterVO zone1 = Mockito.mock(DataCenterVO.class); + Mockito.when(zone1.getId()).thenReturn(1L); + DataCenterVO zone2 = Mockito.mock(DataCenterVO.class); + Mockito.when(zone2.getId()).thenReturn(2L); + List zones = Arrays.asList(zone1, zone2); + Mockito.when(dataCenterDao.listAllZones()).thenReturn(zones); + + List imageStores = Collections.singletonList(Mockito.mock(ImageStoreVO.class)); + Mockito.when(imageStoreDao.listStoresByZoneId(1L)).thenReturn(imageStores); + Mockito.when(imageStoreDao.listStoresByZoneId(2L)).thenReturn(imageStores); + + ReflectionTestUtils.invokeMethod(kubernetesVersionManager, "validateImageStoreForZone", (Long) null, false); + } + + @Test(expected = InvalidParameterValueException.class) + public void testValidateImageStoreForAllZonesWithSomeMissingStorage() { + DataCenterVO zone1 = Mockito.mock(DataCenterVO.class); + Mockito.when(zone1.getId()).thenReturn(1L); + DataCenterVO zone2 = Mockito.mock(DataCenterVO.class); + Mockito.when(zone2.getId()).thenReturn(2L); + Mockito.when(zone2.getName()).thenReturn("zone-without-storage"); + List zones = Arrays.asList(zone1, zone2); + Mockito.when(dataCenterDao.listAllZones()).thenReturn(zones); + + List imageStores = Collections.singletonList(Mockito.mock(ImageStoreVO.class)); + Mockito.when(imageStoreDao.listStoresByZoneId(1L)).thenReturn(imageStores); + Mockito.when(imageStoreDao.listStoresByZoneId(2L)).thenReturn(Collections.emptyList()); + + ReflectionTestUtils.invokeMethod(kubernetesVersionManager, "validateImageStoreForZone", (Long) null, false); + } } diff --git a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionServiceTest.java b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionServiceTest.java index b874a9a0ffa..7ba35169b9e 100644 --- a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionServiceTest.java +++ b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/version/KubernetesVersionServiceTest.java @@ -78,6 +78,9 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; +import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; + @RunWith(MockitoJUnitRunner.class) public class KubernetesVersionServiceTest { @@ -97,6 +100,8 @@ public class KubernetesVersionServiceTest { @Mock private DataCenterDao dataCenterDao; @Mock + private ImageStoreDao imageStoreDao; + @Mock private TemplateApiService templateService; @Mock private Account accountMock; @@ -128,6 +133,10 @@ public class KubernetesVersionServiceTest { DataCenterVO zone = Mockito.mock(DataCenterVO.class); when(dataCenterDao.findById(Mockito.anyLong())).thenReturn(zone); + List imageStores = new ArrayList<>(); + imageStores.add(Mockito.mock(ImageStoreVO.class)); + when(imageStoreDao.listStoresByZoneId(Mockito.anyLong())).thenReturn(imageStores); + TemplateJoinVO templateJoinVO = Mockito.mock(TemplateJoinVO.class); when(templateJoinVO.getUrl()).thenReturn("https://download.cloudstack.com"); when(templateJoinVO.getState()).thenReturn(ObjectInDataStoreStateMachine.State.Ready); From cf36fb00008b3f7a71e155d0e408daa907fa04e3 Mon Sep 17 00:00:00 2001 From: Abhisar Sinha <63767682+abh1sar@users.noreply.github.com> Date: Tue, 20 Jan 2026 12:55:16 +0530 Subject: [PATCH 006/126] Set nfsVersion in ssvm agent.properties only if it is not null (#12445) --- .../secondarystorage/SecondaryStorageManagerImpl.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java index 5698632249d..c9bcb911000 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java @@ -1224,8 +1224,10 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar if (dc.getDns2() != null) { buf.append(" dns2=").append(dc.getDns2()); } - String nfsVersion = imageStoreDetailsUtil != null ? imageStoreDetailsUtil.getNfsVersion(secStores.get(0).getId()) : null; - buf.append(" nfsVersion=").append(nfsVersion); + String nfsVersion = imageStoreDetailsUtil.getNfsVersion(secStores.get(0).getId()); + if (StringUtils.isNotBlank(nfsVersion)) { + buf.append(" nfsVersion=").append(nfsVersion); + } buf.append(" keystore_password=").append(VirtualMachineGuru.getEncodedString(PasswordGenerator.generateRandomPassword(16))); String bootArgs = buf.toString(); if (logger.isDebugEnabled()) { From 496bc0329cdb6fcb6cbefb463d37ba1599192176 Mon Sep 17 00:00:00 2001 From: Nicolas Vazquez Date: Tue, 20 Jan 2026 04:56:32 -0300 Subject: [PATCH 007/126] Fix: Condition for aborting migration, resume paused VMs on destination (#12331) --- .../wrapper/LibvirtMigrateCommandWrapper.java | 46 ++++++++++++++----- 1 file changed, 35 insertions(+), 11 deletions(-) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java index 1f14402c85e..fe18a88fe1f 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java @@ -243,20 +243,21 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper migrateThread = executor.submit(worker); executor.shutdown(); long sleeptime = 0; + final int migrateDowntime = libvirtComputingResource.getMigrateDowntime(); + boolean isMigrateDowntimeSet = false; + while (!executor.isTerminated()) { Thread.sleep(100); sleeptime += 100; - if (sleeptime == 1000) { // wait 1s before attempting to set downtime on migration, since I don't know of a VIR_DOMAIN_MIGRATING state - final int migrateDowntime = libvirtComputingResource.getMigrateDowntime(); - if (migrateDowntime > 0 ) { - try { - final int setDowntime = dm.migrateSetMaxDowntime(migrateDowntime); - if (setDowntime == 0 ) { - logger.debug("Set max downtime for migration of " + vmName + " to " + String.valueOf(migrateDowntime) + "ms"); - } - } catch (final LibvirtException e) { - logger.debug("Failed to set max downtime for migration, perhaps migration completed? Error: " + e.getMessage()); + if (!isMigrateDowntimeSet && migrateDowntime > 0 && sleeptime >= 1000) { // wait 1s before attempting to set downtime on migration, since I don't know of a VIR_DOMAIN_MIGRATING state + try { + final int setDowntime = dm.migrateSetMaxDowntime(migrateDowntime); + if (setDowntime == 0 ) { + isMigrateDowntimeSet = true; + logger.debug("Set max downtime for migration of " + vmName + " to " + String.valueOf(migrateDowntime) + "ms"); } + } catch (final LibvirtException e) { + logger.debug("Failed to set max downtime for migration, perhaps migration completed? Error: " + e.getMessage()); } } if (sleeptime % 1000 == 0) { @@ -272,7 +273,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper Date: Tue, 20 Jan 2026 13:38:39 +0530 Subject: [PATCH 008/126] Storage pool monitor disconnect improvements (#12398) --- .../storage/listener/StoragePoolMonitor.java | 28 +++++++++++++++---- 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java b/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java index a0e10c646b5..6df3cbaeedf 100644 --- a/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java +++ b/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java @@ -21,6 +21,7 @@ import java.util.List; import javax.inject.Inject; import com.cloud.storage.StorageManager; +import com.cloud.utils.Profiler; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; @@ -144,12 +145,13 @@ public class StoragePoolMonitor implements Listener { } @Override - public synchronized boolean processDisconnect(long agentId, Status state) { + public boolean processDisconnect(long agentId, Status state) { return processDisconnect(agentId, null, null, state); } @Override - public synchronized boolean processDisconnect(long agentId, String uuid, String name, Status state) { + public boolean processDisconnect(long agentId, String uuid, String name, Status state) { + logger.debug("Starting disconnect for Agent [id: {}, uuid: {}, name: {}]", agentId, uuid, name); Host host = _storageManager.getHost(agentId); if (host == null) { logger.warn("Agent [id: {}, uuid: {}, name: {}] not found, not disconnecting pools", agentId, uuid, name); @@ -157,38 +159,52 @@ public class StoragePoolMonitor implements Listener { } if (host.getType() != Host.Type.Routing) { + logger.debug("Host [id: {}, uuid: {}, name: {}] is not of type {}, skip", agentId, uuid, name, Host.Type.Routing); return false; } + logger.debug("Looking for connected Storage Pools for Host [id: {}, uuid: {}, name: {}]", agentId, uuid, name); List storagePoolHosts = _storageManager.findStoragePoolsConnectedToHost(host.getId()); if (storagePoolHosts == null) { - if (logger.isTraceEnabled()) { - logger.trace("No pools to disconnect for host: {}", host); - } + logger.debug("No pools to disconnect for host: {}", host); return true; } + logger.debug("Found {} pools to disconnect for host: {}", storagePoolHosts.size(), host); boolean disconnectResult = true; - for (StoragePoolHostVO storagePoolHost : storagePoolHosts) { + int storagePoolHostsSize = storagePoolHosts.size(); + for (int i = 0; i < storagePoolHostsSize; i++) { + StoragePoolHostVO storagePoolHost = storagePoolHosts.get(i); + logger.debug("Processing disconnect from Storage Pool {} ({} of {}) for host: {}", storagePoolHost.getPoolId(), i, storagePoolHostsSize, host); StoragePoolVO pool = _poolDao.findById(storagePoolHost.getPoolId()); if (pool == null) { + logger.debug("No Storage Pool found with id {} ({} of {}) for host: {}", storagePoolHost.getPoolId(), i, storagePoolHostsSize, host); continue; } if (!pool.isShared()) { + logger.debug("Storage Pool {} ({}) ({} of {}) is not shared for host: {}, ignore disconnect", pool.getName(), pool.getUuid(), i, storagePoolHostsSize, host); continue; } // Handle only PowerFlex pool for now, not to impact other pools behavior if (pool.getPoolType() != StoragePoolType.PowerFlex) { + logger.debug("Storage Pool {} ({}) ({} of {}) is not of type {} for host: {}, ignore disconnect", pool.getName(), pool.getUuid(), i, storagePoolHostsSize, pool.getPoolType(), host); continue; } + logger.debug("Sending disconnect to Storage Pool {} ({}) ({} of {}) for host: {}", pool.getName(), pool.getUuid(), i, storagePoolHostsSize, host); + Profiler disconnectProfiler = new Profiler(); try { + disconnectProfiler.start(); _storageManager.disconnectHostFromSharedPool(host, pool); } catch (Exception e) { logger.error("Unable to disconnect host {} from storage pool {} due to {}", host, pool, e.toString()); disconnectResult = false; + } finally { + disconnectProfiler.stop(); + long disconnectDuration = disconnectProfiler.getDurationInMillis() / 1000; + logger.debug("Finished disconnect with result {} from Storage Pool {} ({}) ({} of {}) for host: {}, duration: {} secs", disconnectResult, pool.getName(), pool.getUuid(), i, storagePoolHostsSize, host, disconnectDuration); } } From 036489b288cef2ccd10f1f83e21a7ff34b6b6a03 Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Wed, 21 Jan 2026 09:59:21 +0100 Subject: [PATCH 009/126] CKS: fix resource limitation check on cpu when scale cks cluster (#12379) --- .../cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index e6ed850fba5..f6af2b95978 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -1383,8 +1383,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne } totalAdditionalVms += additional; - long effectiveCpu = (long) so.getCpu() * so.getSpeed(); - totalAdditionalCpuUnits += effectiveCpu * additional; + totalAdditionalCpuUnits += so.getCpu() * additional; totalAdditionalRamMb += so.getRamSize() * additional; try { From 6e5d78a8a78feec31a6dfa893137b65fed5f7677 Mon Sep 17 00:00:00 2001 From: Harikrishna Date: Thu, 22 Jan 2026 12:46:16 +0530 Subject: [PATCH 010/126] Fix NPE on adding new columns in the tables (#12464) * Fix NPE on adding new columns in the tables * Remove assert --- .../java/com/cloud/utils/db/GenericDaoBase.java | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java index 301803aab9b..c3a4d2c2487 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java +++ b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java @@ -89,6 +89,7 @@ import net.sf.cglib.proxy.NoOp; import net.sf.ehcache.Cache; import net.sf.ehcache.CacheManager; import net.sf.ehcache.Element; +import org.springframework.util.ClassUtils; /** * GenericDaoBase is a simple way to implement DAOs. It DOES NOT @@ -2047,16 +2048,22 @@ public abstract class GenericDaoBase extends Compone @DB() protected void setField(final Object entity, final ResultSet rs, ResultSetMetaData meta, final int index) throws SQLException { - Attribute attr = _allColumns.get(new Pair(meta.getTableName(index), meta.getColumnName(index))); + String tableName = meta.getTableName(index); + String columnName = meta.getColumnName(index); + Attribute attr = _allColumns.get(new Pair<>(tableName, columnName)); if (attr == null) { // work around for mysql bug to return original table name instead of view name in db view case Table tbl = entity.getClass().getSuperclass().getAnnotation(Table.class); if (tbl != null) { - attr = _allColumns.get(new Pair(tbl.name(), meta.getColumnLabel(index))); + attr = _allColumns.get(new Pair<>(tbl.name(), meta.getColumnLabel(index))); } } - assert (attr != null) : "How come I can't find " + meta.getCatalogName(index) + "." + meta.getColumnName(index); - setField(entity, attr.field, rs, index); + if(attr == null) { + logger.warn(String.format("Failed to find attribute in the entity %s to map column %s.%s (%s)", + ClassUtils.getUserClass(entity).getSimpleName(), tableName, columnName)); + } else { + setField(entity, attr.field, rs, index); + } } @Override From b5e9178078f0efac75fdb3eb5b07459228da471d Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Thu, 22 Jan 2026 10:56:03 +0100 Subject: [PATCH 011/126] UI: fix issues when deploy VNF applicance on network with SG (#12436) --- ui/public/locales/en.json | 2 +- ui/src/config/section/network.js | 5 ++++- ui/src/views/compute/DeployVnfAppliance.vue | 2 +- ui/src/views/network/VnfAppliancesTab.vue | 2 +- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index aaf499d2f95..64437a4d07c 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -2527,7 +2527,7 @@ "label.vnf.app.action.reinstall": "Reinstall VNF Appliance", "label.vnf.cidr.list": "CIDR from which access to the VNF appliance's Management interface should be allowed from", "label.vnf.cidr.list.tooltip": "the CIDR list to forward traffic from to the VNF management interface. Multiple entries must be separated by a single comma character (,). The default value is 0.0.0.0/0.", -"label.vnf.configure.management": "Configure Firewall and Port Forwarding rules for VNF's management interfaces", +"label.vnf.configure.management": "Configure network rules for VNF's management interfaces", "label.vnf.configure.management.tooltip": "True by default, security group or network rules (source nat and firewall rules) will be configured for VNF management interfaces. False otherwise. Learn what rules are configured at http://docs.cloudstack.apache.org/en/latest/adminguide/networking/vnf_templates_appliances.html#deploying-vnf-appliances", "label.vnf.detail.add": "Add VNF detail", "label.vnf.detail.remove": "Remove VNF detail", diff --git a/ui/src/config/section/network.js b/ui/src/config/section/network.js index 30aae3a8deb..fbc044ff500 100644 --- a/ui/src/config/section/network.js +++ b/ui/src/config/section/network.js @@ -356,7 +356,10 @@ export default { permission: ['listVnfAppliances'], resourceType: 'UserVm', params: () => { - return { details: 'servoff,tmpl,nics', isvnf: true } + return { + details: 'group,nics,secgrp,tmpl,servoff,diskoff,iso,volume,affgrp,backoff,vnfnics', + isvnf: true + } }, columns: () => { const fields = ['name', 'state', 'ipaddress'] diff --git a/ui/src/views/compute/DeployVnfAppliance.vue b/ui/src/views/compute/DeployVnfAppliance.vue index 1117413d710..fec1139ab9b 100644 --- a/ui/src/views/compute/DeployVnfAppliance.vue +++ b/ui/src/views/compute/DeployVnfAppliance.vue @@ -1305,7 +1305,7 @@ export default { for (const deviceId of managementDeviceIds) { if (this.vnfNicNetworks && this.vnfNicNetworks[deviceId] && ((this.vnfNicNetworks[deviceId].type === 'Isolated' && this.vnfNicNetworks[deviceId].vpcid === undefined) || - (this.vnfNicNetworks[deviceId].type === 'Shared' && this.zone.securitygroupsenabled))) { + (this.vnfNicNetworks[deviceId].type === 'Shared' && this.vnfNicNetworks[deviceId].service.filter(svc => svc.name === 'SecurityGroupProvider')))) { return true } } diff --git a/ui/src/views/network/VnfAppliancesTab.vue b/ui/src/views/network/VnfAppliancesTab.vue index 0db85323d15..139516187c4 100644 --- a/ui/src/views/network/VnfAppliancesTab.vue +++ b/ui/src/views/network/VnfAppliancesTab.vue @@ -120,7 +120,7 @@ export default { methods: { fetchData () { var params = { - details: 'servoff,tmpl,nics', + details: 'group,nics,secgrp,tmpl,servoff,diskoff,iso,volume,affgrp,backoff,vnfnics', isVnf: true, listAll: true } From cd5bb09d0d19e4e01baeaad7a6cbe14ab2db28bc Mon Sep 17 00:00:00 2001 From: Abhisar Sinha <63767682+abh1sar@users.noreply.github.com> Date: Thu, 22 Jan 2026 15:29:41 +0530 Subject: [PATCH 012/126] Fix potential leaks in executePipedCommands (#12478) --- .../java/com/cloud/utils/script/Script.java | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/utils/src/main/java/com/cloud/utils/script/Script.java b/utils/src/main/java/com/cloud/utils/script/Script.java index 6c62c910648..ffda782edda 100644 --- a/utils/src/main/java/com/cloud/utils/script/Script.java +++ b/utils/src/main/java/com/cloud/utils/script/Script.java @@ -40,9 +40,11 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import org.apache.cloudstack.utils.security.KeyStoreUtils; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.io.IOUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -708,13 +710,31 @@ public class Script implements Callable { return executeCommandForExitValue(0, command); } + private static void cleanupProcesses(AtomicReference> processesRef) { + List processes = processesRef.get(); + if (CollectionUtils.isNotEmpty(processes)) { + for (Process process : processes) { + if (process == null) { + continue; + } + LOGGER.trace(String.format("Cleaning up process [%s] from piped commands.", process.pid())); + IOUtils.closeQuietly(process.getErrorStream()); + IOUtils.closeQuietly(process.getOutputStream()); + IOUtils.closeQuietly(process.getInputStream()); + process.destroyForcibly(); + } + } + } + public static Pair executePipedCommands(List commands, long timeout) { if (timeout <= 0) { timeout = DEFAULT_TIMEOUT; } + final AtomicReference> processesRef = new AtomicReference<>(); Callable> commandRunner = () -> { List builders = commands.stream().map(ProcessBuilder::new).collect(Collectors.toList()); List processes = ProcessBuilder.startPipeline(builders); + processesRef.set(processes); Process last = processes.get(processes.size()-1); try (BufferedReader reader = new BufferedReader(new InputStreamReader(last.getInputStream()))) { String line; @@ -741,6 +761,8 @@ public class Script implements Callable { result.second(ERR_TIMEOUT); } catch (InterruptedException | ExecutionException e) { LOGGER.error("Error executing piped commands", e); + } finally { + cleanupProcesses(processesRef); } return result; } From d1eb2822d9d5b346840851cf21611345454ed734 Mon Sep 17 00:00:00 2001 From: Vishesh <8760112+vishesh92@users.noreply.github.com> Date: Thu, 22 Jan 2026 18:59:35 +0530 Subject: [PATCH 013/126] Remove redundant Exceptions from logs for vm schedules (#12428) --- .../vm/schedule/dao/VMScheduledJobDao.java | 2 ++ .../vm/schedule/dao/VMScheduledJobDaoImpl.java | 15 +++++++++++++++ .../cloudstack/vm/schedule/VMSchedulerImpl.java | 8 +++++++- 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/dao/VMScheduledJobDao.java b/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/dao/VMScheduledJobDao.java index 7b8c01aae6a..835ac696f26 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/dao/VMScheduledJobDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/dao/VMScheduledJobDao.java @@ -31,4 +31,6 @@ public interface VMScheduledJobDao extends GenericDao { int expungeJobsForSchedules(List scheduleId, Date dateAfter); int expungeJobsBefore(Date currentTimestamp); + + VMScheduledJobVO findByScheduleAndTimestamp(long scheduleId, Date scheduledTimestamp); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/dao/VMScheduledJobDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/dao/VMScheduledJobDaoImpl.java index 50a2b12fd77..2f08a41b92e 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/dao/VMScheduledJobDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/vm/schedule/dao/VMScheduledJobDaoImpl.java @@ -39,6 +39,8 @@ public class VMScheduledJobDaoImpl extends GenericDaoBase expungeJobForScheduleSearch; + private final SearchBuilder scheduleAndTimestampSearch; + static final String SCHEDULED_TIMESTAMP = "scheduled_timestamp"; static final String VM_SCHEDULE_ID = "vm_schedule_id"; @@ -58,6 +60,11 @@ public class VMScheduledJobDaoImpl extends GenericDaoBase sc = scheduleAndTimestampSearch.create(); + sc.setParameters(VM_SCHEDULE_ID, scheduleId); + sc.setParameters(SCHEDULED_TIMESTAMP, scheduledTimestamp); + return findOneBy(sc); + } } diff --git a/server/src/main/java/org/apache/cloudstack/vm/schedule/VMSchedulerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/schedule/VMSchedulerImpl.java index 7410fb1c265..56d794fa5c2 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/schedule/VMSchedulerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/schedule/VMSchedulerImpl.java @@ -162,7 +162,13 @@ public class VMSchedulerImpl extends ManagerBase implements VMScheduler, Configu } Date scheduledDateTime = Date.from(ts.toInstant()); - VMScheduledJobVO scheduledJob = new VMScheduledJobVO(vmSchedule.getVmId(), vmSchedule.getId(), vmSchedule.getAction(), scheduledDateTime); + VMScheduledJobVO scheduledJob = vmScheduledJobDao.findByScheduleAndTimestamp(vmSchedule.getId(), scheduledDateTime); + if (scheduledJob != null) { + logger.trace("Job is already scheduled for schedule {} at {}", vmSchedule, scheduledDateTime); + return scheduledDateTime; + } + + scheduledJob = new VMScheduledJobVO(vmSchedule.getVmId(), vmSchedule.getId(), vmSchedule.getAction(), scheduledDateTime); try { vmScheduledJobDao.persist(scheduledJob); ActionEventUtils.onScheduledActionEvent(User.UID_SYSTEM, vm.getAccountId(), actionEventMap.get(vmSchedule.getAction()), From 6846619a6f1f27bb8fe67be8161e4ca839c6c4fc Mon Sep 17 00:00:00 2001 From: Nicolas Vazquez Date: Thu, 22 Jan 2026 10:32:46 -0300 Subject: [PATCH 014/126] Fix update network offering domainids size limitation (#12431) --- .../api/command/admin/network/UpdateNetworkOfferingCmd.java | 1 + 1 file changed, 1 insertion(+) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateNetworkOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateNetworkOfferingCmd.java index 9af10262b2d..8910966ba2e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateNetworkOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateNetworkOfferingCmd.java @@ -78,6 +78,7 @@ public class UpdateNetworkOfferingCmd extends BaseCmd { @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.STRING, + length = 4096, description = "The ID of the containing domain(s) as comma separated string, public for public offerings") private String domainIds; From 6a9835904cb35983ab88e539fe0a5b4c8ce9931b Mon Sep 17 00:00:00 2001 From: Nicolas Vazquez Date: Thu, 22 Jan 2026 10:57:46 -0300 Subject: [PATCH 015/126] Fix for zoneids parameters length on updateAPIs (#12440) --- .../api/command/admin/offering/UpdateDiskOfferingCmd.java | 1 + .../api/command/admin/offering/UpdateServiceOfferingCmd.java | 1 + .../cloudstack/api/command/admin/vpc/UpdateVPCOfferingCmd.java | 1 + 3 files changed, 3 insertions(+) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java index 2f07f85f983..c93b5d41a1c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java @@ -75,6 +75,7 @@ public class UpdateDiskOfferingCmd extends BaseCmd { @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.STRING, description = "The ID of the containing zone(s) as comma separated string, all for all zones offerings", + length = 4096, since = "4.13") private String zoneIds; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java index 0dc97659b9d..26c7d87ab45 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java @@ -69,6 +69,7 @@ public class UpdateServiceOfferingCmd extends BaseCmd { @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.STRING, description = "The ID of the containing zone(s) as comma separated string, all for all zones offerings", + length = 4096, since = "4.13") private String zoneIds; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCOfferingCmd.java index b8a8077b30b..44bc88c8daf 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCOfferingCmd.java @@ -65,6 +65,7 @@ public class UpdateVPCOfferingCmd extends BaseAsyncCmd { @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.STRING, description = "The ID of the containing zone(s) as comma separated string, all for all zones offerings", + length = 4096, since = "4.13") private String zoneIds; From bce3e54a7e46216917acfe2f1ba1e2a9c9b12128 Mon Sep 17 00:00:00 2001 From: Daman Arora <61474540+Damans227@users.noreply.github.com> Date: Thu, 22 Jan 2026 09:02:46 -0500 Subject: [PATCH 016/126] improve error handling for template upload notifications (#12412) Co-authored-by: Daman Arora --- ui/src/utils/plugins.js | 13 +++++++------ ui/src/views/image/RegisterOrUploadTemplate.vue | 6 +----- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/ui/src/utils/plugins.js b/ui/src/utils/plugins.js index a07f8178604..0ec957c8729 100644 --- a/ui/src/utils/plugins.js +++ b/ui/src/utils/plugins.js @@ -218,18 +218,19 @@ export const notifierPlugin = { if (error.response.status) { msg = `${i18n.global.t('message.request.failed')} (${error.response.status})` } - if (error.message) { - desc = error.message - } - if (error.response.headers && 'x-description' in error.response.headers) { + if (error.response.headers?.['x-description']) { desc = error.response.headers['x-description'] - } - if (desc === '' && error.response.data) { + } else if (error.response.data) { const responseKey = _.findKey(error.response.data, 'errortext') if (responseKey) { desc = error.response.data[responseKey].errortext + } else if (typeof error.response.data === 'string') { + desc = error.response.data } } + if (!desc && error.message) { + desc = error.message + } } let countNotify = store.getters.countNotify countNotify++ diff --git a/ui/src/views/image/RegisterOrUploadTemplate.vue b/ui/src/views/image/RegisterOrUploadTemplate.vue index 76df7b246aa..3ada9f6fd53 100644 --- a/ui/src/views/image/RegisterOrUploadTemplate.vue +++ b/ui/src/views/image/RegisterOrUploadTemplate.vue @@ -638,11 +638,7 @@ export default { this.$emit('refresh-data') this.closeAction() }).catch(e => { - this.$notification.error({ - message: this.$t('message.upload.failed'), - description: `${this.$t('message.upload.template.failed.description')} - ${e}`, - duration: 0 - }) + this.$notifyError(e) }) }, fetchCustomHypervisorName () { From 8db065a14eb41bab0fb3420e66ee96722f1ed6ad Mon Sep 17 00:00:00 2001 From: Manoj Kumar Date: Fri, 23 Jan 2026 21:04:52 +0530 Subject: [PATCH 017/126] limit iso filename to have 251 chars at max (#12430) --- .../api/BaseUpdateTemplateOrIsoCmd.java | 2 +- .../api/command/user/iso/RegisterIsoCmd.java | 2 +- .../cloud/upgrade/DatabaseUpgradeChecker.java | 2 + .../upgrade/dao/Upgrade42020to42030.java | 64 +++++++++++++++++++ .../META-INF/db/schema-42020to42030.sql | 22 +++++++ 5 files changed, 90 insertions(+), 2 deletions(-) create mode 100644 engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42020to42030.java create mode 100644 engine/schema/src/main/resources/META-INF/db/schema-42020to42030.sql diff --git a/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java index 38cf765dd1a..696a500860e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java @@ -42,7 +42,7 @@ public abstract class BaseUpdateTemplateOrIsoCmd extends BaseCmd { @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = TemplateResponse.class, required = true, description = "The ID of the image file") private Long id; - @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "The name of the image file") + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, length = 251, description = "The name of the image file") private String templateName; @Parameter(name = ApiConstants.OS_TYPE_ID, diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java index f499c01ce58..2de0f96f271 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java @@ -70,7 +70,7 @@ public class RegisterIsoCmd extends BaseCmd implements UserCmd { @Parameter(name = ApiConstants.IS_EXTRACTABLE, type = CommandType.BOOLEAN, description = "True if the ISO or its derivatives are extractable; default is false") private Boolean extractable; - @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, description = "The name of the ISO") + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, length = 251, description = "The name of the ISO") private String isoName; @Parameter(name = ApiConstants.OS_TYPE_ID, diff --git a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java index afb7a8d69e6..a8a166fbf27 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -33,6 +33,7 @@ import java.util.List; import javax.inject.Inject; +import com.cloud.upgrade.dao.Upgrade42020to42030; import com.cloud.utils.FileUtil; import org.apache.cloudstack.utils.CloudStackVersion; import org.apache.commons.lang3.StringUtils; @@ -236,6 +237,7 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker { .next("4.19.0.0", new Upgrade41900to41910()) .next("4.19.1.0", new Upgrade41910to42000()) .next("4.20.0.0", new Upgrade42000to42010()) + .next("4.20.2.0", new Upgrade42020to42030()) .build(); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42020to42030.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42020to42030.java new file mode 100644 index 00000000000..68100e16401 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42020to42030.java @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.InputStream; +import java.sql.Connection; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade42020to42030 extends DbUpgradeAbstractImpl implements DbUpgrade, DbUpgradeSystemVmTemplate { + + @Override + public String[] getUpgradableVersionRange() { + return new String[]{"4.20.2.0", "4.20.3.0"}; + } + + @Override + public String getUpgradedVersion() { + return "4.20.3.0"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-42020to42030.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public InputStream[] getCleanupScripts() { + return null; + } + + @Override + public void updateSystemVmTemplates(Connection conn) { + } +} diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42020to42030.sql b/engine/schema/src/main/resources/META-INF/db/schema-42020to42030.sql new file mode 100644 index 00000000000..598fdb7adc4 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-42020to42030.sql @@ -0,0 +1,22 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.20.2.0 to 4.20.3.0 +--; + +ALTER TABLE `cloud`.`template_store_ref` MODIFY COLUMN `download_url` varchar(2048); From c8cadcb56e553bdbbc141365061bd3542f43612e Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Mon, 26 Jan 2026 14:01:14 +0530 Subject: [PATCH 018/126] NPE fix while deleting storage pool when pool has detached volumes (#12451) * NPE fix while deleting storage pool when pool has detached volumes * review * unit tests * Added log for volumes not attached to any VMs * update filter, log and test * updated volume dao method names returning non destroyed volumes * build fix --------- Co-authored-by: dahn --- .../java/com/cloud/storage/dao/VolumeDao.java | 6 +- .../com/cloud/storage/dao/VolumeDaoImpl.java | 6 +- .../datastore/PrimaryDataStoreImpl.java | 2 +- .../cloudstack/sioc/SiocManagerImpl.java | 2 +- .../driver/DateraPrimaryDataStoreDriver.java | 2 +- .../provider/DateraHostListener.java | 4 +- .../SolidFirePrimaryDataStoreDriver.java | 2 +- .../provider/SolidFireHostListener.java | 4 +- .../StorPoolPrimaryDataStoreDriver.java | 2 +- .../cloud/resource/ResourceManagerImpl.java | 4 +- .../java/com/cloud/server/StatsCollector.java | 2 +- .../com/cloud/storage/StorageManagerImpl.java | 16 ++++-- .../storage/StoragePoolAutomationImpl.java | 2 +- .../java/com/cloud/vm/UserVmManagerImpl.java | 2 +- .../resource/ResourceManagerImplTest.java | 12 ++-- .../cloud/storage/StorageManagerImplTest.java | 56 ++++++++++++++++++- 16 files changed, 90 insertions(+), 34 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java index 4936af3caab..83f02719518 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDao.java @@ -48,7 +48,7 @@ public interface VolumeDao extends GenericDao, StateDao findIncludingRemovedByInstanceAndType(long id, Volume.Type vType); - List findByInstanceIdAndPoolId(long instanceId, long poolId); + List findNonDestroyedVolumesByInstanceIdAndPoolId(long instanceId, long poolId); List findByInstanceIdDestroyed(long vmId); @@ -70,11 +70,11 @@ public interface VolumeDao extends GenericDao, StateDao findCreatedByInstance(long id); - List findByPoolId(long poolId); + List findNonDestroyedVolumesByPoolId(long poolId); VolumeVO findByPoolIdName(long poolId, String name); - List findByPoolId(long poolId, Volume.Type volumeType); + List findNonDestroyedVolumesByPoolId(long poolId, Volume.Type volumeType); List findByPoolIdAndState(long poolid, Volume.State state); diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java index 5ef64b04664..a72b4a25845 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java @@ -135,7 +135,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol } @Override - public List findByPoolId(long poolId) { + public List findNonDestroyedVolumesByPoolId(long poolId) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("poolId", poolId); sc.setParameters("notDestroyed", Volume.State.Destroy, Volume.State.Expunged); @@ -144,7 +144,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol } @Override - public List findByInstanceIdAndPoolId(long instanceId, long poolId) { + public List findNonDestroyedVolumesByInstanceIdAndPoolId(long instanceId, long poolId) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("instanceId", instanceId); sc.setParameters("poolId", poolId); @@ -161,7 +161,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol } @Override - public List findByPoolId(long poolId, Volume.Type volumeType) { + public List findNonDestroyedVolumesByPoolId(long poolId, Volume.Type volumeType) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("poolId", poolId); sc.setParameters("notDestroyed", Volume.State.Destroy, Volume.State.Expunged); diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java index 6a10c26cc0b..d864bf8cd8c 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java @@ -126,7 +126,7 @@ public class PrimaryDataStoreImpl implements PrimaryDataStore { @Override public List getVolumes() { - List volumes = volumeDao.findByPoolId(getId()); + List volumes = volumeDao.findNonDestroyedVolumesByPoolId(getId()); List volumeInfos = new ArrayList(); for (VolumeVO volume : volumes) { volumeInfos.add(VolumeObject.getVolumeObject(this, volume)); diff --git a/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java b/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java index e93b8df39e9..b01af35725f 100644 --- a/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java +++ b/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java @@ -123,7 +123,7 @@ public class SiocManagerImpl implements SiocManager { int limitIopsTotal = 0; - List volumes = volumeDao.findByPoolId(storagePoolId, null); + List volumes = volumeDao.findNonDestroyedVolumesByPoolId(storagePoolId, null); if (volumes != null && volumes.size() > 0) { Set instanceIds = new HashSet<>(); diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java index dcf84525748..62393610499 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java @@ -563,7 +563,7 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver { private long getUsedBytes(StoragePool storagePool, long volumeIdToIgnore) { long usedSpaceBytes = 0; - List lstVolumes = _volumeDao.findByPoolId(storagePool.getId(), null); + List lstVolumes = _volumeDao.findNonDestroyedVolumesByPoolId(storagePool.getId(), null); if (lstVolumes != null) { for (VolumeVO volume : lstVolumes) { diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java index a0dc23da486..08bc89737f2 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java @@ -247,7 +247,7 @@ public class DateraHostListener implements HypervisorHostListener { List storagePaths = new ArrayList<>(); // If you do not pass in null for the second parameter, you only get back applicable ROOT disks. - List volumes = _volumeDao.findByPoolId(storagePoolId, null); + List volumes = _volumeDao.findNonDestroyedVolumesByPoolId(storagePoolId, null); if (volumes != null) { for (VolumeVO volume : volumes) { @@ -317,7 +317,7 @@ public class DateraHostListener implements HypervisorHostListener { StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId); // If you do not pass in null for the second parameter, you only get back applicable ROOT disks. - List volumes = _volumeDao.findByPoolId(storagePoolId, null); + List volumes = _volumeDao.findNonDestroyedVolumesByPoolId(storagePoolId, null); if (volumes != null) { for (VolumeVO volume : volumes) { diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java index 6cc76d99d9e..1e927e20168 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java @@ -433,7 +433,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { public long getUsedIops(StoragePool storagePool) { long usedIops = 0; - List volumes = volumeDao.findByPoolId(storagePool.getId(), null); + List volumes = volumeDao.findNonDestroyedVolumesByPoolId(storagePool.getId(), null); if (volumes != null) { for (VolumeVO volume : volumes) { diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java index 052191128f1..c961c926739 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java @@ -199,7 +199,7 @@ public class SolidFireHostListener implements HypervisorHostListener { List storagePaths = new ArrayList<>(); // If you do not pass in null for the second parameter, you only get back applicable ROOT disks. - List volumes = volumeDao.findByPoolId(storagePoolId, null); + List volumes = volumeDao.findNonDestroyedVolumesByPoolId(storagePoolId, null); if (volumes != null) { for (VolumeVO volume : volumes) { @@ -230,7 +230,7 @@ public class SolidFireHostListener implements HypervisorHostListener { StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); // If you do not pass in null for the second parameter, you only get back applicable ROOT disks. - List volumes = volumeDao.findByPoolId(storagePoolId, null); + List volumes = volumeDao.findNonDestroyedVolumesByPoolId(storagePoolId, null); if (volumes != null) { for (VolumeVO volume : volumes) { diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java index 6ca67cb5923..619beee3ec6 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java @@ -1276,7 +1276,7 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { return volumeStats; } } else { - List volumes = volumeDao.findByPoolId(storagePool.getId()); + List volumes = volumeDao.findNonDestroyedVolumesByPoolId(storagePool.getId()); for (VolumeVO volume : volumes) { if (volume.getPath() != null && volume.getPath().equals(volumeId)) { long size = volume.getSize(); diff --git a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java index e62e89eb0ef..a77ecfcb7fe 100755 --- a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java @@ -1026,8 +1026,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } protected void destroyLocalStoragePoolVolumes(long poolId) { - List rootDisks = volumeDao.findByPoolId(poolId); - List dataVolumes = volumeDao.findByPoolId(poolId, Volume.Type.DATADISK); + List rootDisks = volumeDao.findNonDestroyedVolumesByPoolId(poolId); + List dataVolumes = volumeDao.findNonDestroyedVolumesByPoolId(poolId, Volume.Type.DATADISK); List volumes = new ArrayList<>(); addVolumesToList(volumes, rootDisks); diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index 7e83d452bb9..1e0138f7cf9 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -1646,7 +1646,7 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc List pools = _storagePoolDao.listAll(); for (StoragePoolVO pool : pools) { - List volumes = _volsDao.findByPoolId(pool.getId(), null); + List volumes = _volsDao.findNonDestroyedVolumesByPoolId(pool.getId(), null); for (VolumeVO volume : volumes) { if (!List.of(ImageFormat.QCOW2, ImageFormat.VHD, ImageFormat.OVA, ImageFormat.RAW).contains(volume.getFormat()) && !List.of(Storage.StoragePoolType.PowerFlex, Storage.StoragePoolType.FiberChannel).contains(pool.getPoolType())) { diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 8392c85527d..13b7fbb00c2 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -1558,17 +1558,21 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C protected String getStoragePoolNonDestroyedVolumesLog(long storagePoolId) { StringBuilder sb = new StringBuilder(); - List nonDestroyedVols = volumeDao.findByPoolId(storagePoolId, null); + List nonDestroyedVols = volumeDao.findNonDestroyedVolumesByPoolId(storagePoolId, null); VMInstanceVO volInstance; List logMessageInfo = new ArrayList<>(); sb.append("["); for (VolumeVO vol : nonDestroyedVols) { - volInstance = _vmInstanceDao.findById(vol.getInstanceId()); - if (volInstance != null) { - logMessageInfo.add(String.format("Volume [%s] (attached to VM [%s])", vol.getUuid(), volInstance.getUuid())); + if (vol.getInstanceId() != null) { + volInstance = _vmInstanceDao.findById(vol.getInstanceId()); + if (volInstance != null) { + logMessageInfo.add(String.format("Volume [%s] (attached to VM [%s])", vol.getUuid(), volInstance.getUuid())); + } else { + logMessageInfo.add(String.format("Volume [%s] (attached VM with ID [%d] doesn't exists)", vol.getUuid(), vol.getInstanceId())); + } } else { - logMessageInfo.add(String.format("Volume [%s]", vol.getUuid())); + logMessageInfo.add(String.format("Volume [%s] (not attached to any VM)", vol.getUuid())); } } sb.append(String.join(", ", logMessageInfo)); @@ -2640,7 +2644,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C for (String childDatastoreUUID : childDatastoreUUIDs) { StoragePoolVO dataStoreVO = _storagePoolDao.findPoolByUUID(childDatastoreUUID); - List allVolumes = volumeDao.findByPoolId(dataStoreVO.getId()); + List allVolumes = volumeDao.findNonDestroyedVolumesByPoolId(dataStoreVO.getId()); allVolumes.removeIf(volumeVO -> volumeVO.getInstanceId() == null); allVolumes.removeIf(volumeVO -> volumeVO.getState() != Volume.State.Ready); for (VolumeVO volume : allVolumes) { diff --git a/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java b/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java index 612582640f4..667af5a876f 100644 --- a/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java +++ b/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java @@ -91,7 +91,7 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation { boolean restart = !CollectionUtils.isEmpty(upPools); // 2. Get a list of all the ROOT volumes within this storage pool - List allVolumes = volumeDao.findByPoolId(pool.getId()); + List allVolumes = volumeDao.findNonDestroyedVolumesByPoolId(pool.getId()); // 3. Enqueue to the work queue enqueueMigrationsForVolumes(allVolumes, pool); // 4. Process the queue diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index b00358caaa9..3e045f5a905 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -2261,7 +2261,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir private List getVolumesByHost(HostVO host, StoragePool pool){ List vmsPerHost = _vmInstanceDao.listByHostId(host.getId()); return vmsPerHost.stream() - .flatMap(vm -> _volsDao.findByInstanceIdAndPoolId(vm.getId(),pool.getId()).stream().map(vol -> + .flatMap(vm -> _volsDao.findNonDestroyedVolumesByInstanceIdAndPoolId(vm.getId(),pool.getId()).stream().map(vol -> vol.getState() == Volume.State.Ready ? (vol.getFormat() == ImageFormat.OVA ? vol.getChainInfo() : vol.getPath()) : null).filter(Objects::nonNull)) .collect(Collectors.toList()); } diff --git a/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java b/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java index 414d41145f7..5b7353bded6 100644 --- a/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java +++ b/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java @@ -198,8 +198,8 @@ public class ResourceManagerImplTest { rootDisks = Arrays.asList(rootDisk1, rootDisk2); dataDisks = Collections.singletonList(dataDisk); - when(volumeDao.findByPoolId(poolId)).thenReturn(rootDisks); - when(volumeDao.findByPoolId(poolId, Volume.Type.DATADISK)).thenReturn(dataDisks); + when(volumeDao.findNonDestroyedVolumesByPoolId(poolId)).thenReturn(rootDisks); + when(volumeDao.findNonDestroyedVolumesByPoolId(poolId, Volume.Type.DATADISK)).thenReturn(dataDisks); } @After @@ -564,22 +564,22 @@ public class ResourceManagerImplTest { @Test public void testDestroyLocalStoragePoolVolumesOnlyRootDisks() { - when(volumeDao.findByPoolId(poolId, Volume.Type.DATADISK)).thenReturn(null); + when(volumeDao.findNonDestroyedVolumesByPoolId(poolId, Volume.Type.DATADISK)).thenReturn(null); resourceManager.destroyLocalStoragePoolVolumes(poolId); verify(volumeDao, times(rootDisks.size())).updateAndRemoveVolume(any(VolumeVO.class)); } @Test public void testDestroyLocalStoragePoolVolumesOnlyDataDisks() { - when(volumeDao.findByPoolId(poolId)).thenReturn(null); + when(volumeDao.findNonDestroyedVolumesByPoolId(poolId)).thenReturn(null); resourceManager.destroyLocalStoragePoolVolumes(poolId); verify(volumeDao, times(dataDisks.size())).updateAndRemoveVolume(any(VolumeVO.class)); } @Test public void testDestroyLocalStoragePoolVolumesNoDisks() { - when(volumeDao.findByPoolId(poolId)).thenReturn(null); - when(volumeDao.findByPoolId(poolId, Volume.Type.DATADISK)).thenReturn(null); + when(volumeDao.findNonDestroyedVolumesByPoolId(poolId)).thenReturn(null); + when(volumeDao.findNonDestroyedVolumesByPoolId(poolId, Volume.Type.DATADISK)).thenReturn(null); resourceManager.destroyLocalStoragePoolVolumes(poolId); verify(volumeDao, never()).updateAndRemoveVolume(any(VolumeVO.class)); } diff --git a/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java b/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java index 4a28e044d9c..5f02c89339a 100644 --- a/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java +++ b/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java @@ -531,7 +531,7 @@ public class StorageManagerImplTest { } @Test - public void getStoragePoolNonDestroyedVolumesLogTestNonDestroyedVolumesReturnLog() { + public void getStoragePoolNonDestroyedVolumesLogTestNonDestroyedVolumes_VMAttachedLogs() { Mockito.doReturn(1L).when(storagePoolVOMock).getId(); Mockito.doReturn(1L).when(volume1VOMock).getInstanceId(); Mockito.doReturn("786633d1-a942-4374-9d56-322dd4b0d202").when(volume1VOMock).getUuid(); @@ -539,7 +539,7 @@ public class StorageManagerImplTest { Mockito.doReturn("ffb46333-e983-4c21-b5f0-51c5877a3805").when(volume2VOMock).getUuid(); Mockito.doReturn("58760044-928f-4c4e-9fef-d0e48423595e").when(vmInstanceVOMock).getUuid(); - Mockito.when(_volumeDao.findByPoolId(storagePoolVOMock.getId(), null)).thenReturn(List.of(volume1VOMock, volume2VOMock)); + Mockito.when(_volumeDao.findNonDestroyedVolumesByPoolId(storagePoolVOMock.getId(), null)).thenReturn(List.of(volume1VOMock, volume2VOMock)); Mockito.doReturn(vmInstanceVOMock).when(vmInstanceDao).findById(Mockito.anyLong()); String log = storageManagerImpl.getStoragePoolNonDestroyedVolumesLog(storagePoolVOMock.getId()); @@ -548,6 +548,58 @@ public class StorageManagerImplTest { Assert.assertEquals(expected, log); } + @Test + public void getStoragePoolNonDestroyedVolumesLogTestNonDestroyedVolumes_VMLogForOneVolume() { + Mockito.doReturn(1L).when(storagePoolVOMock).getId(); + Mockito.doReturn(null).when(volume1VOMock).getInstanceId(); + Mockito.doReturn("786633d1-a942-4374-9d56-322dd4b0d202").when(volume1VOMock).getUuid(); + Mockito.doReturn(1L).when(volume2VOMock).getInstanceId(); + Mockito.doReturn("ffb46333-e983-4c21-b5f0-51c5877a3805").when(volume2VOMock).getUuid(); + Mockito.doReturn("58760044-928f-4c4e-9fef-d0e48423595e").when(vmInstanceVOMock).getUuid(); + + Mockito.when(_volumeDao.findNonDestroyedVolumesByPoolId(storagePoolVOMock.getId(), null)).thenReturn(List.of(volume1VOMock, volume2VOMock)); + Mockito.doReturn(vmInstanceVOMock).when(vmInstanceDao).findById(Mockito.anyLong()); + + String log = storageManagerImpl.getStoragePoolNonDestroyedVolumesLog(storagePoolVOMock.getId()); + String expected = String.format("[Volume [%s] (not attached to any VM), Volume [%s] (attached to VM [%s])]", volume1VOMock.getUuid(), volume2VOMock.getUuid(), vmInstanceVOMock.getUuid()); + + Assert.assertEquals(expected, log); + } + + @Test + public void getStoragePoolNonDestroyedVolumesLogTestNonDestroyedVolumes_NotAttachedLogs() { + Mockito.doReturn(1L).when(storagePoolVOMock).getId(); + Mockito.doReturn(null).when(volume1VOMock).getInstanceId(); + Mockito.doReturn("786633d1-a942-4374-9d56-322dd4b0d202").when(volume1VOMock).getUuid(); + Mockito.doReturn(null).when(volume2VOMock).getInstanceId(); + Mockito.doReturn("ffb46333-e983-4c21-b5f0-51c5877a3805").when(volume2VOMock).getUuid(); + + Mockito.when(_volumeDao.findNonDestroyedVolumesByPoolId(storagePoolVOMock.getId(), null)).thenReturn(List.of(volume1VOMock, volume2VOMock)); + + String log = storageManagerImpl.getStoragePoolNonDestroyedVolumesLog(storagePoolVOMock.getId()); + String expected = String.format("[Volume [%s] (not attached to any VM), Volume [%s] (not attached to any VM)]", volume1VOMock.getUuid(), volume2VOMock.getUuid()); + + Assert.assertEquals(expected, log); + } + + @Test + public void getStoragePoolNonDestroyedVolumesLogTestNonDestroyedVolumes_VMNotExistsLog() { + Mockito.doReturn(1L).when(storagePoolVOMock).getId(); + Mockito.doReturn(1L).when(volume1VOMock).getInstanceId(); + Mockito.doReturn("786633d1-a942-4374-9d56-322dd4b0d202").when(volume1VOMock).getUuid(); + Mockito.doReturn(1L).when(volume2VOMock).getInstanceId(); + Mockito.doReturn("ffb46333-e983-4c21-b5f0-51c5877a3805").when(volume2VOMock).getUuid(); + + Mockito.when(_volumeDao.findNonDestroyedVolumesByPoolId(storagePoolVOMock.getId(), null)).thenReturn(List.of(volume1VOMock, volume2VOMock)); + Mockito.doReturn(null).when(vmInstanceDao).findById(Mockito.anyLong()); + + String log = storageManagerImpl.getStoragePoolNonDestroyedVolumesLog(storagePoolVOMock.getId()); + String expected = String.format("[Volume [%s] (attached VM with ID [%d] doesn't exists), Volume [%s] (attached VM with ID [%d] doesn't exists)]", + volume1VOMock.getUuid(), volume1VOMock.getInstanceId(), volume2VOMock.getUuid(), volume2VOMock.getInstanceId()); + + Assert.assertEquals(expected, log); + } + private ChangeStoragePoolScopeCmd mockChangeStoragePooolScopeCmd(String newScope) { ChangeStoragePoolScopeCmd cmd = new ChangeStoragePoolScopeCmd(); ReflectionTestUtils.setField(cmd, "id", 1L); From 4adb7195701f2b28b86456f739ad59ec9f369abf Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 26 Jan 2026 04:18:12 -0500 Subject: [PATCH 019/126] Allow modification of user vm details if user.vm.readonly.details is empty (#10456) --- .../apache/cloudstack/query/QueryService.java | 2 +- .../framework/config/ConfigKey.java | 28 +++++++++++++++++-- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/query/QueryService.java b/api/src/main/java/org/apache/cloudstack/query/QueryService.java index 828f9d5e064..5181ebe2b76 100644 --- a/api/src/main/java/org/apache/cloudstack/query/QueryService.java +++ b/api/src/main/java/org/apache/cloudstack/query/QueryService.java @@ -118,7 +118,7 @@ public interface QueryService { ConfigKey UserVMReadOnlyDetails = new ConfigKey<>(String.class, "user.vm.readonly.details", "Advanced", "dataDiskController, rootDiskController", - "List of read-only VM settings/details as comma separated string", true, ConfigKey.Scope.Global, null, null, null, null, null, ConfigKey.Kind.CSV, null); + "List of read-only VM settings/details as comma separated string", true, ConfigKey.Scope.Global, null, null, null, null, null, ConfigKey.Kind.CSV, null, ""); ConfigKey SortKeyAscending = new ConfigKey<>("Advanced", Boolean.class, "sortkey.algorithm", "true", "Sort algorithm - ascending or descending - to use. For entities that use sort key(template, disk offering, service offering, " + diff --git a/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigKey.java b/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigKey.java index 00cf56345c8..27b04ddf893 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigKey.java +++ b/framework/config/src/main/java/org/apache/cloudstack/framework/config/ConfigKey.java @@ -120,10 +120,18 @@ public class ConfigKey { static ConfigDepotImpl s_depot = null; - static public void init(ConfigDepotImpl depot) { + private String _defaultValueIfEmpty = null; + + public static void init(ConfigDepotImpl depot) { s_depot = depot; } + public ConfigKey(Class type, String name, String category, String defaultValue, String description, boolean isDynamic, Scope scope, T multiplier, + String displayText, String parent, Ternary group, Pair subGroup, Kind kind, String options, String defaultValueIfEmpty) { + this(type, name, category, defaultValue, description, isDynamic, scope, multiplier, displayText, parent, group, subGroup, kind, options); + this._defaultValueIfEmpty = defaultValueIfEmpty; + } + public ConfigKey(String category, Class type, String name, String defaultValue, String description, boolean isDynamic, Scope scope) { this(type, name, category, defaultValue, description, isDynamic, scope, null); } @@ -216,7 +224,19 @@ public class ConfigKey { public T value() { if (_value == null || isDynamic()) { String value = s_depot != null ? s_depot.getConfigStringValue(_name, Scope.Global, null) : null; - _value = valueOf((value == null) ? defaultValue() : value); + + String effective; + if (value != null) { + if (value.isEmpty() && _defaultValueIfEmpty != null) { + effective = _defaultValueIfEmpty; + } else { + effective = value; + } + } else { + effective = _defaultValueIfEmpty != null ? _defaultValueIfEmpty : defaultValue(); + } + + _value = valueOf(effective); } return _value; @@ -231,6 +251,10 @@ public class ConfigKey { if (value == null) { return value(); } + + if (value.isEmpty() && _defaultValueIfEmpty != null) { + return valueOf(_defaultValueIfEmpty); + } return valueOf(value); } From 0958dfc13864315e83ae906bf4f90328ccd1557c Mon Sep 17 00:00:00 2001 From: Artem Sidorenko Date: Mon, 26 Jan 2026 10:21:47 +0100 Subject: [PATCH 020/126] Fix: proper permissions for systemvm template registrations on hardened systems (#12098) Related to https://github.com/apache/cloudstack/issues/10029#issuecomment-2531599607 We have umask 0077, so cloud-install-sys-tmplt is creating by default paths like below ``` $ ls -l /mnt/secondary/template/tmpl/ total 16 drwx------. 3 root root 4096 Nov 19 13:58 1 drwxrwxrwx. 7 root root 4096 Oct 31 09:42 2 drwxrwxrwx. 3 root root 4096 Oct 30 15:59 4 drwxr-xr-x. 2 root root 4096 Oct 31 10:21 5 $ ls -l /mnt/secondary/template/tmpl/1/ total 4 drwx------. 2 root root 4096 Nov 19 13:59 3 $ ls -l /mnt/secondary/template/tmpl/1/3/ total 549848 -rw-------. 1 root root 563032576 Nov 19 13:59 d23a1e19-c563-4f69-85ca-8721cf02082c.qcow2 -rw-------. 1 root root 287 Nov 19 13:59 template.properties ``` This results to the permissions problems later on, when trying to access the image Signed-off-by: Artem Sidorenko --- scripts/storage/secondary/cloud-install-sys-tmplt | 1 + scripts/storage/secondary/setup-sysvm-tmplt | 1 + 2 files changed, 2 insertions(+) diff --git a/scripts/storage/secondary/cloud-install-sys-tmplt b/scripts/storage/secondary/cloud-install-sys-tmplt index ad976c502c6..fc09dc968ff 100755 --- a/scripts/storage/secondary/cloud-install-sys-tmplt +++ b/scripts/storage/secondary/cloud-install-sys-tmplt @@ -44,6 +44,7 @@ failed() { } #set -x +umask 0022 # ensure we have the proper permissions even on hardened deployments mflag= fflag= ext="vhd" diff --git a/scripts/storage/secondary/setup-sysvm-tmplt b/scripts/storage/secondary/setup-sysvm-tmplt index 06f0586fe34..63006cc4e4c 100755 --- a/scripts/storage/secondary/setup-sysvm-tmplt +++ b/scripts/storage/secondary/setup-sysvm-tmplt @@ -19,6 +19,7 @@ # Usage: e.g. failed $? "this is an error" set -x +umask 0022 # ensure we have the proper permissions even on hardened deployments failed() { local returnval=$1 From d010e9fcf29822a312618eee5d7a4c8f0eb69d2e Mon Sep 17 00:00:00 2001 From: Manoj Kumar Date: Mon, 26 Jan 2026 15:03:30 +0530 Subject: [PATCH 021/126] Notify user if template upgrade is not required (#12483) --- .../cloud/network/router/VirtualNetworkApplianceManagerImpl.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index e171b68399b..7d0a4f20838 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -3358,6 +3358,7 @@ Configurable, StateListener Date: Mon, 26 Jan 2026 16:23:47 +0530 Subject: [PATCH 022/126] snapshot: fix listSnapshots for volume which got delete and whose storage pool got deleted (#12433) This fixes the case when the storage pool is removed as well the KVM host and the subsequent volumes on the host. When that happened, listing snapshots (for recovery purposes) cause NPE as the pool_id was null, but last_pool_id for the related destroyed volume wasn't null. This adds a fallback logic. Signed-off-by: Rohit Yadav --- .../storage/snapshot/StorageSystemSnapshotStrategy.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java index a19397d03e3..560bb4b2fc1 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java @@ -951,7 +951,7 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { VolumeVO volumeVO = volumeDao.findByIdIncludingRemoved(volumeId); - long volumeStoragePoolId = volumeVO.getPoolId(); + long volumeStoragePoolId = (volumeVO.getPoolId() != null ? volumeVO.getPoolId() : volumeVO.getLastPoolId()); if (SnapshotOperation.REVERT.equals(op)) { boolean baseVolumeExists = volumeVO.getRemoved() == null; From 63bdc2b990314f5443961a24b7522397a65bc81f Mon Sep 17 00:00:00 2001 From: Manoj Kumar Date: Mon, 26 Jan 2026 16:25:55 +0530 Subject: [PATCH 023/126] Add log for null templateVO (#12406) --- .../cloudstack/storage/image/TemplateDataFactoryImpl.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java index c6430bcf9f9..3e1504beb3a 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java @@ -296,6 +296,9 @@ public class TemplateDataFactoryImpl implements TemplateDataFactory { @Override public boolean isTemplateMarkedForDirectDownload(long templateId) { VMTemplateVO templateVO = imageDataDao.findById(templateId); + if (templateVO == null) { + throw new CloudRuntimeException(String.format("Template not found with ID: %s", templateId)); + } return templateVO.isDirectDownload(); } } From 097c3a018bae6faf6cdac2db09c56507b980fa4f Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Mon, 26 Jan 2026 11:56:14 +0100 Subject: [PATCH 024/126] ConfigDrive: use file absolute path instead of canonical path to create ISO (#11623) * ConfigDrive: use file absolute path instead of canonical path to create ISO * el8: add xorrisofs as option --- .../storage/configdrive/ConfigDriveBuilder.java | 4 ++-- .../storage/configdrive/ConfigDriveBuilderTest.java | 12 ++++++------ packaging/el8/cloud.spec | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/engine/storage/configdrive/src/main/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilder.java b/engine/storage/configdrive/src/main/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilder.java index 0b81a25b1cd..15febbe972c 100644 --- a/engine/storage/configdrive/src/main/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilder.java +++ b/engine/storage/configdrive/src/main/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilder.java @@ -231,9 +231,9 @@ public class ConfigDriveBuilder { throw new CloudRuntimeException("Cannot create ISO for config drive using any know tool. Known paths [/usr/bin/genisoimage, /usr/bin/mkisofs, /usr/local/bin/mkisofs]"); } if (!isoCreator.canExecute()) { - throw new CloudRuntimeException("Cannot create ISO for config drive using: " + isoCreator.getCanonicalPath()); + throw new CloudRuntimeException("Cannot create ISO for config drive using: " + isoCreator.getAbsolutePath()); } - return isoCreator.getCanonicalPath(); + return isoCreator.getAbsolutePath(); } /** diff --git a/engine/storage/configdrive/src/test/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilderTest.java b/engine/storage/configdrive/src/test/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilderTest.java index c04ff0a1601..03ceac84399 100644 --- a/engine/storage/configdrive/src/test/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilderTest.java +++ b/engine/storage/configdrive/src/test/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilderTest.java @@ -435,7 +435,7 @@ public class ConfigDriveBuilderTest { Mockito.verify(genIsoFileMock, Mockito.times(2)).exists(); Mockito.verify(genIsoFileMock).canExecute(); - Mockito.verify(genIsoFileMock).getCanonicalPath(); + Mockito.verify(genIsoFileMock).getAbsolutePath(); } } @@ -475,11 +475,11 @@ public class ConfigDriveBuilderTest { Mockito.verify(genIsoFileMock, Mockito.times(1)).exists(); Mockito.verify(genIsoFileMock, Mockito.times(0)).canExecute(); - Mockito.verify(genIsoFileMock, Mockito.times(0)).getCanonicalPath(); + Mockito.verify(genIsoFileMock, Mockito.times(0)).getAbsolutePath(); Mockito.verify(mkIsoProgramInLinuxFileMock, Mockito.times(2)).exists(); Mockito.verify(mkIsoProgramInLinuxFileMock, Mockito.times(1)).canExecute(); - Mockito.verify(mkIsoProgramInLinuxFileMock, Mockito.times(1)).getCanonicalPath(); + Mockito.verify(mkIsoProgramInLinuxFileMock, Mockito.times(1)).getAbsolutePath(); } } @@ -509,15 +509,15 @@ public class ConfigDriveBuilderTest { Mockito.verify(genIsoFileMock, Mockito.times(1)).exists(); Mockito.verify(genIsoFileMock, Mockito.times(0)).canExecute(); - Mockito.verify(genIsoFileMock, Mockito.times(0)).getCanonicalPath(); + Mockito.verify(genIsoFileMock, Mockito.times(0)).getAbsolutePath(); Mockito.verify(mkIsoProgramInLinuxFileMock, Mockito.times(1)).exists(); Mockito.verify(mkIsoProgramInLinuxFileMock, Mockito.times(0)).canExecute(); - Mockito.verify(mkIsoProgramInLinuxFileMock, Mockito.times(0)).getCanonicalPath(); + Mockito.verify(mkIsoProgramInLinuxFileMock, Mockito.times(0)).getAbsolutePath(); Mockito.verify(mkIsoProgramInMacOsFileMock, Mockito.times(1)).exists(); Mockito.verify(mkIsoProgramInMacOsFileMock, Mockito.times(1)).canExecute(); - Mockito.verify(mkIsoProgramInMacOsFileMock, Mockito.times(1)).getCanonicalPath(); + Mockito.verify(mkIsoProgramInMacOsFileMock, Mockito.times(1)).getAbsolutePath(); } } diff --git a/packaging/el8/cloud.spec b/packaging/el8/cloud.spec index 3d485112266..507a6e64173 100644 --- a/packaging/el8/cloud.spec +++ b/packaging/el8/cloud.spec @@ -76,7 +76,7 @@ Requires: sudo Requires: /sbin/service Requires: /sbin/chkconfig Requires: /usr/bin/ssh-keygen -Requires: (genisoimage or mkisofs) +Requires: (genisoimage or mkisofs or xorrisofs) Requires: ipmitool Requires: %{name}-common = %{_ver} Requires: (iptables-services or iptables) From 36edd92e480d8b738335c6500b2e90c4d3f91fb9 Mon Sep 17 00:00:00 2001 From: Henrique Sato Date: Mon, 26 Jan 2026 07:58:42 -0300 Subject: [PATCH 025/126] Fix snapshot physical size after migration (#12166) --- .../cloudstack/storage/image/SecondaryStorageServiceImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java index 641a2a40dcd..f739fecf9bf 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java @@ -280,7 +280,7 @@ public class SecondaryStorageServiceImpl implements SecondaryStorageService { private void updateDataObject(DataObject srcData, DataObject destData) { if (destData instanceof SnapshotInfo) { SnapshotDataStoreVO snapshotStore = snapshotStoreDao.findBySourceSnapshot(srcData.getId(), DataStoreRole.Image); - SnapshotDataStoreVO destSnapshotStore = snapshotStoreDao.findByStoreSnapshot(DataStoreRole.Image, srcData.getDataStore().getId(), srcData.getId()); + SnapshotDataStoreVO destSnapshotStore = snapshotStoreDao.findByStoreSnapshot(DataStoreRole.Image, destData.getDataStore().getId(), destData.getId()); if (snapshotStore != null && destSnapshotStore != null) { destSnapshotStore.setPhysicalSize(snapshotStore.getPhysicalSize()); destSnapshotStore.setCreated(snapshotStore.getCreated()); From 44793da58f29e534562b757bbf905071099507c3 Mon Sep 17 00:00:00 2001 From: Edward-x <30854794+YLChen-007@users.noreply.github.com> Date: Mon, 26 Jan 2026 19:22:22 +0800 Subject: [PATCH 026/126] =?UTF-8?q?fix=20Sensitive=20Data=20Exposure=20Thr?= =?UTF-8?q?ough=20Exception=20Logging=20in=20OVM=20Hypervis=E2=80=A6=20(#1?= =?UTF-8?q?2032)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix Sensitive Data Exposure Through Exception Logging in OVM Hypervisor Configuration * extra ‘)’ in log. Co-authored-by: Abhisar Sinha <63767682+abh1sar@users.noreply.github.com> * remove non-descriptive part Co-authored-by: Suresh Kumar Anaparti --------- Co-authored-by: chenyoulong20g@ict.ac.cn Co-authored-by: dahn Co-authored-by: Abhisar Sinha <63767682+abh1sar@users.noreply.github.com> Co-authored-by: Suresh Kumar Anaparti --- .../src/main/java/com/cloud/ovm/hypervisor/OvmResourceBase.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmResourceBase.java b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmResourceBase.java index 9d958a9894a..a65e4d778d3 100644 --- a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmResourceBase.java +++ b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmResourceBase.java @@ -362,7 +362,7 @@ public class OvmResourceBase implements ServerResource, HypervisorResource { sshConnection = SSHCmdHelper.acquireAuthorizedConnection(_ip, _username, _password); if (sshConnection == null) { - throw new CloudRuntimeException(String.format("Cannot connect to ovm host(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); + throw new CloudRuntimeException(String.format("Cannot connect to ovm host(IP=%1$s, username=%2$s)", _ip, _username)); } if (!SSHCmdHelper.sshExecuteCmd(sshConnection, "sh /usr/bin/configureOvm.sh postSetup")) { From bbc23a74683052228d254ba0e4958f4c19254f90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bernardo=20De=20Marco=20Gon=C3=A7alves?= Date: Mon, 26 Jan 2026 09:14:40 -0300 Subject: [PATCH 027/126] fix install path for systemvm templates when introducing new sec storage (#11605) --- .../cloudstack/storage/image/TemplateServiceImpl.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java index 1bb954da410..c18be7c7335 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java @@ -1318,9 +1318,10 @@ public class TemplateServiceImpl implements TemplateService { if (_vmTemplateStoreDao.isTemplateMarkedForDirectDownload(tmplt.getId())) { continue; } - tmpltStore = - new TemplateDataStoreVO(storeId, tmplt.getId(), new Date(), 100, Status.DOWNLOADED, null, null, null, - TemplateConstants.DEFAULT_SYSTEM_VM_TEMPLATE_PATH + tmplt.getId() + '/', tmplt.getUrl()); + String templateDirectoryPath = TemplateConstants.DEFAULT_TMPLT_ROOT_DIR + File.separator + TemplateConstants.DEFAULT_TMPLT_FIRST_LEVEL_DIR; + String installPath = templateDirectoryPath + tmplt.getAccountId() + File.separator + tmplt.getId() + File.separator; + tmpltStore = new TemplateDataStoreVO(storeId, tmplt.getId(), new Date(), 100, Status.DOWNLOADED, + null, null, null, installPath, tmplt.getUrl()); tmpltStore.setSize(0L); tmpltStore.setPhysicalSize(0); // no size information for // pre-seeded system vm templates From 7536516e41636c9ae77ddda89c4f9827bfe55ba5 Mon Sep 17 00:00:00 2001 From: Manoj Kumar Date: Mon, 26 Jan 2026 18:12:43 +0530 Subject: [PATCH 028/126] add missing label text for label.aclname (#12511) --- ui/public/locales/en.json | 1 + 1 file changed, 1 insertion(+) diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 64437a4d07c..8bb7dba9bf5 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -47,6 +47,7 @@ "label.acl.rules": "ACL rules", "label.acl.reason.description": "Enter the reason behind an ACL rule.", "label.aclid": "ACL", +"label.aclname": "ACL name", "label.acl.rule.name": "ACL rule name", "label.acquire.new.ip": "Acquire new IP", "label.acquire.new.secondary.ip": "Acquire new secondary IP", From d50899427a70185bd0933bf386ae52aa0eab396b Mon Sep 17 00:00:00 2001 From: Daan Hoogland Date: Mon, 26 Jan 2026 14:17:38 +0100 Subject: [PATCH 029/126] merge forward error --- .../src/main/java/com/cloud/resource/ResourceManagerImpl.java | 2 +- .../test/java/com/cloud/resource/ResourceManagerImplTest.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java index ab3162e7a4b..110353c4b48 100755 --- a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java @@ -2366,7 +2366,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, List conflictingHostIds = new ArrayList<>(CollectionUtils.intersection(hostIdsToDisconnect, hostIdsUsingTheStoragePool)); if (CollectionUtils.isNotEmpty(conflictingHostIds)) { Map> hostVolumeMap = new HashMap<>(); - List volumesInPool = volumeDao.findByPoolId(poolId); + List volumesInPool = volumeDao.findNonDestroyedVolumesByPoolId(poolId); Map vmInstanceCache = new HashMap<>(); for (Long hostId : conflictingHostIds) { diff --git a/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java b/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java index 6ed0774a423..7e60c111ab2 100644 --- a/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java +++ b/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java @@ -944,7 +944,7 @@ public class ResourceManagerImplTest { Mockito.when(volume2.getInstanceId()).thenReturn(101L); List volumesInPool = Arrays.asList(volume1, volume2); - Mockito.doReturn(volumesInPool).when(volumeDao).findByPoolId(poolId); + Mockito.doReturn(volumesInPool).when(volumeDao).findNonDestroyedVolumesByPoolId(poolId); VMInstanceVO vmInstance1 = Mockito.mock(VMInstanceVO.class); VMInstanceVO vmInstance2 = Mockito.mock(VMInstanceVO.class); From 88181ebe722c95bc628851c25b8b6fad06a14780 Mon Sep 17 00:00:00 2001 From: John Bampton Date: Mon, 26 Jan 2026 23:59:31 +1000 Subject: [PATCH 030/126] Standardize and auto add license headers to all cfg files with pre-commit (#12230) --- .pre-commit-config.yaml | 10 +++++++ setup/dev/s3.cfg | 29 +++++++++--------- systemvm/debian/etc/haproxy/haproxy.cfg | 17 +++++++++++ .../devcloud-kvm-advanced-fusion.cfg | 30 +++++++++---------- tools/devcloud-kvm/devcloud-kvm-advanced.cfg | 30 +++++++++---------- tools/devcloud-kvm/devcloud-kvm.cfg | 30 +++++++++---------- tools/devcloud4/advanced/marvin.cfg | 30 +++++++++---------- tools/devcloud4/basic/marvin.cfg | 30 +++++++++---------- 8 files changed, 112 insertions(+), 94 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 26adafcbf26..49829caf125 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -52,6 +52,16 @@ repos: args: ['644'] files: \.md$ stages: [manual] + - id: insert-license + name: add license for all cfg files + description: automatically adds a licence header to all cfg files that don't have a license header + files: \.cfg$ + args: + - --comment-style + - '|#|' + - --license-filepath + - .github/workflows/license-templates/LICENSE.txt + - --fuzzy-match-generates-todo - id: insert-license name: add license for all Markdown files files: \.md$ diff --git a/setup/dev/s3.cfg b/setup/dev/s3.cfg index de28e5b2698..ce414f584cf 100644 --- a/setup/dev/s3.cfg +++ b/setup/dev/s3.cfg @@ -1,20 +1,19 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. +# http://www.apache.org/licenses/LICENSE-2.0 # +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. # TODO: Change ACCESS_KEY/ SECRET_KEY to your credentials on the object store diff --git a/systemvm/debian/etc/haproxy/haproxy.cfg b/systemvm/debian/etc/haproxy/haproxy.cfg index 21964f297c2..68a4cd7cd58 100644 --- a/systemvm/debian/etc/haproxy/haproxy.cfg +++ b/systemvm/debian/etc/haproxy/haproxy.cfg @@ -1,3 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + global log 127.0.0.1:3914 local0 info chroot /var/lib/haproxy diff --git a/tools/devcloud-kvm/devcloud-kvm-advanced-fusion.cfg b/tools/devcloud-kvm/devcloud-kvm-advanced-fusion.cfg index b1a3418e5d3..ce3ec91bbcf 100644 --- a/tools/devcloud-kvm/devcloud-kvm-advanced-fusion.cfg +++ b/tools/devcloud-kvm/devcloud-kvm-advanced-fusion.cfg @@ -1,21 +1,19 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. { "zones": [ diff --git a/tools/devcloud-kvm/devcloud-kvm-advanced.cfg b/tools/devcloud-kvm/devcloud-kvm-advanced.cfg index a3a41da874f..60ad8b58b9f 100644 --- a/tools/devcloud-kvm/devcloud-kvm-advanced.cfg +++ b/tools/devcloud-kvm/devcloud-kvm-advanced.cfg @@ -1,21 +1,19 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. { "zones": [ diff --git a/tools/devcloud-kvm/devcloud-kvm.cfg b/tools/devcloud-kvm/devcloud-kvm.cfg index ffd23504ffe..5ac417a13dc 100644 --- a/tools/devcloud-kvm/devcloud-kvm.cfg +++ b/tools/devcloud-kvm/devcloud-kvm.cfg @@ -1,20 +1,20 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. +# http://www.apache.org/licenses/LICENSE-2.0 # +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + # This is a stock devcloud config converted from the file # tools/devcloud/devcloud.cfg. diff --git a/tools/devcloud4/advanced/marvin.cfg b/tools/devcloud4/advanced/marvin.cfg index 222dc65d045..7b6e656e620 100644 --- a/tools/devcloud4/advanced/marvin.cfg +++ b/tools/devcloud4/advanced/marvin.cfg @@ -1,21 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. +# http://www.apache.org/licenses/LICENSE-2.0 # +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. { "zones": [ diff --git a/tools/devcloud4/basic/marvin.cfg b/tools/devcloud4/basic/marvin.cfg index 1c8ee547b26..9b7d73c381b 100644 --- a/tools/devcloud4/basic/marvin.cfg +++ b/tools/devcloud4/basic/marvin.cfg @@ -1,21 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at # -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. +# http://www.apache.org/licenses/LICENSE-2.0 # +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. { "zones": [ From 63c8b5fc5627fd0de6d05609625e826de1a6f677 Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Mon, 26 Jan 2026 15:23:27 +0100 Subject: [PATCH 031/126] api/server: support deploy-as-is template as VNF template (#12499) --- .../user/vm/DeployVnfApplianceCmd.java | 2 +- .../storage/template/VnfTemplateManager.java | 4 +++- .../storage/template/VnfTemplateUtils.java | 18 ++++++++++++++++ .../cloud/template/TemplateManagerImpl.java | 9 ++++++++ .../java/com/cloud/vm/UserVmManagerImpl.java | 2 +- .../template/VnfTemplateManagerImpl.java | 21 ++++++++++++++++++- .../template/TemplateManagerImplTest.java | 8 +++++++ .../com/cloud/vm/UserVmManagerImplTest.java | 6 +++--- .../template/VnfTemplateManagerImplTest.java | 8 +++---- ui/public/locales/en.json | 2 +- ui/src/views/compute/DeployVnfAppliance.vue | 14 +++++++++++-- .../views/compute/wizard/VnfNicsSelection.vue | 5 +++++ 12 files changed, 85 insertions(+), 14 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVnfApplianceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVnfApplianceCmd.java index 4d50dd9c39b..92ddfd5b235 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVnfApplianceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVnfApplianceCmd.java @@ -43,7 +43,7 @@ import java.util.List; public class DeployVnfApplianceCmd extends DeployVMCmd implements UserCmd { @Parameter(name = ApiConstants.VNF_CONFIGURE_MANAGEMENT, type = CommandType.BOOLEAN, required = false, - description = "True by default, security group or network rules (source nat and firewall rules) will be configured for VNF management interfaces. False otherwise. " + + description = "False by default, security group or network rules (source nat and firewall rules) will be configured for VNF management interfaces. True otherwise. " + "Network rules are configured if management network is an isolated network or shared network with security groups.") private Boolean vnfConfigureManagement; diff --git a/api/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManager.java b/api/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManager.java index 6571346ad65..3df59811561 100644 --- a/api/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManager.java +++ b/api/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManager.java @@ -29,6 +29,7 @@ import org.apache.cloudstack.api.command.user.template.UpdateVnfTemplateCmd; import org.apache.cloudstack.api.command.user.vm.DeployVnfApplianceCmd; import org.apache.cloudstack.framework.config.ConfigKey; import java.util.List; +import java.util.Map; public interface VnfTemplateManager { @@ -42,11 +43,12 @@ public interface VnfTemplateManager { void updateVnfTemplate(long templateId, UpdateVnfTemplateCmd cmd); - void validateVnfApplianceNics(VirtualMachineTemplate template, List networkIds); + void validateVnfApplianceNics(VirtualMachineTemplate template, List networkIds, Map vmNetworkMap); SecurityGroup createSecurityGroupForVnfAppliance(DataCenter zone, VirtualMachineTemplate template, Account owner, DeployVnfApplianceCmd cmd); void createIsolatedNetworkRulesForVnfAppliance(DataCenter zone, VirtualMachineTemplate template, Account owner, UserVm vm, DeployVnfApplianceCmd cmd) throws InsufficientAddressCapacityException, ResourceAllocationException, ResourceUnavailableException; + } diff --git a/api/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateUtils.java b/api/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateUtils.java index e997a50cec0..16ff2abb564 100644 --- a/api/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateUtils.java +++ b/api/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateUtils.java @@ -16,6 +16,7 @@ // under the License. package org.apache.cloudstack.storage.template; +import com.cloud.agent.api.to.deployasis.OVFNetworkTO; import com.cloud.exception.InvalidParameterValueException; import com.cloud.network.VNF; import com.cloud.storage.Storage; @@ -124,6 +125,9 @@ public class VnfTemplateUtils { public static void validateApiCommandParams(BaseCmd cmd, VirtualMachineTemplate template) { if (cmd instanceof RegisterVnfTemplateCmd) { RegisterVnfTemplateCmd registerCmd = (RegisterVnfTemplateCmd) cmd; + if (registerCmd.isDeployAsIs() && CollectionUtils.isNotEmpty(registerCmd.getVnfNics())) { + throw new InvalidParameterValueException("VNF nics cannot be specified when register a deploy-as-is Template. Please wait until Template settings are read from OVA."); + } validateApiCommandParams(registerCmd.getVnfDetails(), registerCmd.getVnfNics(), registerCmd.getTemplateType()); } else if (cmd instanceof UpdateVnfTemplateCmd) { UpdateVnfTemplateCmd updateCmd = (UpdateVnfTemplateCmd) cmd; @@ -149,4 +153,18 @@ public class VnfTemplateUtils { } } } + + public static void validateDeployAsIsTemplateVnfNics(List ovfNetworks, List vnfNics) { + if (CollectionUtils.isEmpty(vnfNics)) { + return; + } + if (CollectionUtils.isEmpty(ovfNetworks)) { + throw new InvalidParameterValueException("The list of networks read from OVA is empty. Please wait until the template is fully downloaded and processed."); + } + for (VNF.VnfNic vnfNic : vnfNics) { + if (vnfNic.getDeviceId() < ovfNetworks.size() && !vnfNic.isRequired()) { + throw new InvalidParameterValueException(String.format("The VNF nic [device ID: %s ] is required as it is defined in the OVA template.", vnfNic.getDeviceId())); + } + } + } } diff --git a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java index ba8e5714180..2c7d2d593e3 100755 --- a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java @@ -122,6 +122,7 @@ import com.cloud.agent.api.to.DatadiskTO; import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.NfsTO; import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.agent.api.to.deployasis.OVFNetworkTO; import com.cloud.api.ApiDBUtils; import com.cloud.api.query.dao.UserVmJoinDao; import com.cloud.api.query.vo.UserVmJoinVO; @@ -131,6 +132,7 @@ import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; import com.cloud.deploy.DeployDestination; +import com.cloud.deployasis.dao.TemplateDeployAsIsDetailsDao; import com.cloud.domain.Domain; import com.cloud.domain.dao.DomainDao; import com.cloud.event.ActionEvent; @@ -313,6 +315,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, protected SnapshotHelper snapshotHelper; @Inject VnfTemplateManager vnfTemplateManager; + @Inject + TemplateDeployAsIsDetailsDao templateDeployAsIsDetailsDao; @Inject private SecondaryStorageHeuristicDao secondaryStorageHeuristicDao; @@ -2172,6 +2176,11 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, templateType = validateTemplateType(cmd, isAdmin, template.isCrossZones()); if (cmd instanceof UpdateVnfTemplateCmd) { VnfTemplateUtils.validateApiCommandParams(cmd, template); + UpdateVnfTemplateCmd updateCmd = (UpdateVnfTemplateCmd) cmd; + if (template.isDeployAsIs() && CollectionUtils.isNotEmpty(updateCmd.getVnfNics())) { + List ovfNetworks = templateDeployAsIsDetailsDao.listNetworkRequirementsByTemplateId(template.getId()); + VnfTemplateUtils.validateDeployAsIsTemplateVnfNics(ovfNetworks, updateCmd.getVnfNics()); + } vnfTemplateManager.updateVnfTemplate(template.getId(), (UpdateVnfTemplateCmd) cmd); } templateTag = ((UpdateTemplateCmd)cmd).getTemplateTag(); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 3e045f5a905..815ac4f70fe 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -6127,7 +6127,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("Unable to use template " + templateId); } if (TemplateType.VNF.equals(template.getTemplateType())) { - vnfTemplateManager.validateVnfApplianceNics(template, cmd.getNetworkIds()); + vnfTemplateManager.validateVnfApplianceNics(template, cmd.getNetworkIds(), cmd.getVmNetworkMap()); } else if (cmd instanceof DeployVnfApplianceCmd) { throw new InvalidParameterValueException("Can't deploy VNF appliance from a non-VNF template"); } diff --git a/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java b/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java index ef0f6f6b226..0ebff237a44 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java @@ -201,7 +201,14 @@ public class VnfTemplateManagerImpl extends ManagerBase implements VnfTemplateMa } @Override - public void validateVnfApplianceNics(VirtualMachineTemplate template, List networkIds) { + public void validateVnfApplianceNics(VirtualMachineTemplate template, List networkIds, Map vmNetworkMap) { + if (template.isDeployAsIs()) { + if (CollectionUtils.isNotEmpty(networkIds)) { + throw new InvalidParameterValueException("VNF nics mappings should be empty for deploy-as-is templates"); + } + validateVnfApplianceNetworksMap(template, vmNetworkMap); + return; + } if (CollectionUtils.isEmpty(networkIds)) { throw new InvalidParameterValueException("VNF nics list is empty"); } @@ -213,6 +220,18 @@ public class VnfTemplateManagerImpl extends ManagerBase implements VnfTemplateMa } } + private void validateVnfApplianceNetworksMap(VirtualMachineTemplate template, Map vmNetworkMap) { + if (MapUtils.isEmpty(vmNetworkMap)) { + throw new InvalidParameterValueException("VNF networks map is empty"); + } + List vnfNics = vnfTemplateNicDao.listByTemplateId(template.getId()); + for (VnfTemplateNicVO vnfNic : vnfNics) { + if (vnfNic.isRequired() && vmNetworkMap.size() <= vnfNic.getDeviceId()) { + throw new InvalidParameterValueException("VNF nic is required but not found: " + vnfNic); + } + } + } + protected Set getOpenPortsForVnfAppliance(VirtualMachineTemplate template) { Set ports = new HashSet<>(); VnfTemplateDetailVO accessMethodsDetail = vnfTemplateDetailsDao.findDetail(template.getId(), VNF.AccessDetail.ACCESS_METHODS.name().toLowerCase()); diff --git a/server/src/test/java/com/cloud/template/TemplateManagerImplTest.java b/server/src/test/java/com/cloud/template/TemplateManagerImplTest.java index 98b1c05dba8..9680fe5e1fd 100755 --- a/server/src/test/java/com/cloud/template/TemplateManagerImplTest.java +++ b/server/src/test/java/com/cloud/template/TemplateManagerImplTest.java @@ -23,6 +23,7 @@ import com.cloud.agent.AgentManager; import com.cloud.api.query.dao.UserVmJoinDao; import com.cloud.configuration.Resource; import com.cloud.dc.dao.DataCenterDao; +import com.cloud.deployasis.dao.TemplateDeployAsIsDetailsDao; import com.cloud.domain.dao.DomainDao; import com.cloud.event.dao.UsageEventDao; import com.cloud.exception.InvalidParameterValueException; @@ -204,6 +205,8 @@ public class TemplateManagerImplTest { AccountManager _accountMgr; @Inject VnfTemplateManager vnfTemplateManager; + @Inject + TemplateDeployAsIsDetailsDao templateDeployAsIsDetailsDao; @Inject HeuristicRuleHelper heuristicRuleHelperMock; @@ -956,6 +959,11 @@ public class TemplateManagerImplTest { return Mockito.mock(VnfTemplateManager.class); } + @Bean + public TemplateDeployAsIsDetailsDao templateDeployAsIsDetailsDao() { + return Mockito.mock(TemplateDeployAsIsDetailsDao.class); + } + @Bean public SnapshotHelper snapshotHelper() { return Mockito.mock(SnapshotHelper.class); diff --git a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java index ac1ecaa456b..570c57cb68d 100644 --- a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java +++ b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java @@ -1079,7 +1079,7 @@ public class UserVmManagerImplTest { when(templateMock.isDeployAsIs()).thenReturn(false); when(templateMock.getFormat()).thenReturn(Storage.ImageFormat.QCOW2); when(templateMock.getUserDataId()).thenReturn(null); - Mockito.doNothing().when(vnfTemplateManager).validateVnfApplianceNics(any(), nullable(List.class)); + Mockito.doNothing().when(vnfTemplateManager).validateVnfApplianceNics(any(), nullable(List.class), nullable(Map.class)); ServiceOfferingJoinVO svcOfferingMock = Mockito.mock(ServiceOfferingJoinVO.class); when(serviceOfferingJoinDao.findById(anyLong())).thenReturn(svcOfferingMock); @@ -1091,7 +1091,7 @@ public class UserVmManagerImplTest { UserVm result = userVmManagerImpl.createVirtualMachine(deployVMCmd); assertEquals(userVmVoMock, result); - Mockito.verify(vnfTemplateManager).validateVnfApplianceNics(templateMock, null); + Mockito.verify(vnfTemplateManager).validateVnfApplianceNics(templateMock, null, null); Mockito.verify(userVmManagerImpl).createBasicSecurityGroupVirtualMachine(any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), nullable(Boolean.class), any(), any(), any(), any(), any(), any(), any(), eq(true), any()); @@ -1335,7 +1335,7 @@ public class UserVmManagerImplTest { when(templateMock.isDeployAsIs()).thenReturn(false); when(templateMock.getFormat()).thenReturn(Storage.ImageFormat.QCOW2); when(templateMock.getUserDataId()).thenReturn(null); - Mockito.doNothing().when(vnfTemplateManager).validateVnfApplianceNics(any(), nullable(List.class)); + Mockito.doNothing().when(vnfTemplateManager).validateVnfApplianceNics(any(), nullable(List.class), nullable(Map.class)); ServiceOfferingJoinVO svcOfferingMock = Mockito.mock(ServiceOfferingJoinVO.class); when(serviceOfferingJoinDao.findById(anyLong())).thenReturn(svcOfferingMock); diff --git a/server/src/test/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImplTest.java b/server/src/test/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImplTest.java index c3fa0d62604..b9565ebb292 100644 --- a/server/src/test/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImplTest.java @@ -228,25 +228,25 @@ public class VnfTemplateManagerImplTest { @Test public void testValidateVnfApplianceNicsWithRequiredNics() { List networkIds = Arrays.asList(200L, 201L); - vnfTemplateManagerImpl.validateVnfApplianceNics(template, networkIds); + vnfTemplateManagerImpl.validateVnfApplianceNics(template, networkIds, null); } @Test public void testValidateVnfApplianceNicsWithAllNics() { List networkIds = Arrays.asList(200L, 201L, 202L); - vnfTemplateManagerImpl.validateVnfApplianceNics(template, networkIds); + vnfTemplateManagerImpl.validateVnfApplianceNics(template, networkIds, null); } @Test(expected = InvalidParameterValueException.class) public void testValidateVnfApplianceNicsWithEmptyList() { List networkIds = new ArrayList<>(); - vnfTemplateManagerImpl.validateVnfApplianceNics(template, networkIds); + vnfTemplateManagerImpl.validateVnfApplianceNics(template, networkIds, null); } @Test(expected = InvalidParameterValueException.class) public void testValidateVnfApplianceNicsWithMissingNetworkId() { List networkIds = Arrays.asList(200L); - vnfTemplateManagerImpl.validateVnfApplianceNics(template, networkIds); + vnfTemplateManagerImpl.validateVnfApplianceNics(template, networkIds, null); } @Test diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 8bb7dba9bf5..b2465fa325f 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -2529,7 +2529,7 @@ "label.vnf.cidr.list": "CIDR from which access to the VNF appliance's Management interface should be allowed from", "label.vnf.cidr.list.tooltip": "the CIDR list to forward traffic from to the VNF management interface. Multiple entries must be separated by a single comma character (,). The default value is 0.0.0.0/0.", "label.vnf.configure.management": "Configure network rules for VNF's management interfaces", -"label.vnf.configure.management.tooltip": "True by default, security group or network rules (source nat and firewall rules) will be configured for VNF management interfaces. False otherwise. Learn what rules are configured at http://docs.cloudstack.apache.org/en/latest/adminguide/networking/vnf_templates_appliances.html#deploying-vnf-appliances", +"label.vnf.configure.management.tooltip": "False by default, security group or network rules (source nat and firewall rules) will be configured for VNF management interfaces. True otherwise. Learn what rules are configured at http://docs.cloudstack.apache.org/en/latest/adminguide/networking/vnf_templates_appliances.html#deploying-vnf-appliances", "label.vnf.detail.add": "Add VNF detail", "label.vnf.detail.remove": "Remove VNF detail", "label.vnf.details": "VNF Details", diff --git a/ui/src/views/compute/DeployVnfAppliance.vue b/ui/src/views/compute/DeployVnfAppliance.vue index fec1139ab9b..9b09de5a186 100644 --- a/ui/src/views/compute/DeployVnfAppliance.vue +++ b/ui/src/views/compute/DeployVnfAppliance.vue @@ -372,6 +372,7 @@
@@ -1293,7 +1294,8 @@ export default { return tabList }, showVnfNicsSection () { - return this.networks && this.networks.length > 0 && this.vm.templateid && this.templateVnfNics && this.templateVnfNics.length > 0 + return ((this.networks && this.networks.length > 0) || (this.templateNics && this.templateNics.length > 0)) && + this.vm.templateid && this.templateVnfNics && this.templateVnfNics.length > 0 }, showVnfConfigureManagement () { const managementDeviceIds = [] @@ -1303,6 +1305,11 @@ export default { } } for (const deviceId of managementDeviceIds) { + if (this.templateNics && this.templateNics[deviceId] && + ((this.templateNics[deviceId].selectednetworktype === 'Isolated' && this.templateNics[deviceId].selectednetworkvpcid === undefined) || + (this.templateNics[deviceId].selectednetworktype === 'Shared' && this.templateNics[deviceId].selectednetworkwithsg))) { + return true + } if (this.vnfNicNetworks && this.vnfNicNetworks[deviceId] && ((this.vnfNicNetworks[deviceId].type === 'Isolated' && this.vnfNicNetworks[deviceId].vpcid === undefined) || (this.vnfNicNetworks[deviceId].type === 'Shared' && this.vnfNicNetworks[deviceId].service.filter(svc => svc.name === 'SecurityGroupProvider')))) { @@ -2005,7 +2012,7 @@ export default { // All checked networks should be used and only once. // Required NIC must be associated to a network // DeviceID must be consequent - if (this.templateVnfNics && this.templateVnfNics.length > 0) { + if (this.templateVnfNics && this.templateVnfNics.length > 0 && (!this.templateNics || this.templateNics.length === 0)) { let nextDeviceId = 0 const usedNetworkIds = [] const keys = Object.keys(this.vnfNicNetworks) @@ -2629,6 +2636,9 @@ export default { var network = this.options.networks[Math.min(i, this.options.networks.length - 1)] nic.selectednetworkid = network.id nic.selectednetworkname = network.name + nic.selectednetworktype = network.type + nic.selectednetworkvpcid = network.vpcid + nic.selectednetworkwithsg = network.service.filter(svc => svc.name === 'SecurityGroupProvider').length > 0 this.nicToNetworkSelection.push({ nic: nic.id, network: network.id }) } } diff --git a/ui/src/views/compute/wizard/VnfNicsSelection.vue b/ui/src/views/compute/wizard/VnfNicsSelection.vue index fdd5276b4f6..40bdc1c676a 100644 --- a/ui/src/views/compute/wizard/VnfNicsSelection.vue +++ b/ui/src/views/compute/wizard/VnfNicsSelection.vue @@ -50,6 +50,7 @@ @@ -140,6 +141,13 @@ export default { handleSearch (value) { this.filter = value this.fetchData() + }, + handleConfigRefresh (name, updatedRecord) { + if (!name || !updatedRecord) return + const index = this.items.findIndex(item => item.name === name) + if (index !== -1) { + this.items.splice(index, 1, updatedRecord) + } } } } diff --git a/ui/src/views/setting/ConfigurationHierarchy.vue b/ui/src/views/setting/ConfigurationHierarchy.vue index 80b464e657c..815a048bc25 100644 --- a/ui/src/views/setting/ConfigurationHierarchy.vue +++ b/ui/src/views/setting/ConfigurationHierarchy.vue @@ -34,7 +34,7 @@ {{ record.description }} @@ -83,6 +83,9 @@ export default { return 'light-row' } return 'dark-row' + }, + handleConfigRefresh (name, updatedRecord) { + this.$emit('refresh-config', name, updatedRecord) } } } diff --git a/ui/src/views/setting/ConfigurationTab.vue b/ui/src/views/setting/ConfigurationTab.vue index 75905cbd174..65b256c94c9 100644 --- a/ui/src/views/setting/ConfigurationTab.vue +++ b/ui/src/views/setting/ConfigurationTab.vue @@ -58,7 +58,8 @@ :count="count" :page="page" :pagesize="pagesize" - @change-page="changePage" /> + @change-page="changePage" + @refresh-config="handleConfigRefresh" /> + :config="config" + @refresh-config="handleConfigRefresh" /> @@ -322,6 +324,13 @@ export default { '#' + this.$route.path ) } + }, + handleConfigRefresh (name, updatedRecord) { + if (!name || !updatedRecord) return + const index = this.config.findIndex(item => item.name === name) + if (index !== -1) { + this.config.splice(index, 1, updatedRecord) + } } } } diff --git a/ui/src/views/setting/ConfigurationTable.vue b/ui/src/views/setting/ConfigurationTable.vue index da05b9342a0..7edc1b1aad6 100644 --- a/ui/src/views/setting/ConfigurationTable.vue +++ b/ui/src/views/setting/ConfigurationTable.vue @@ -32,7 +32,10 @@ {{record.displaytext }} {{ ' (' + record.name + ')' }}
{{ record.description }} @@ -113,6 +116,9 @@ export default { return 'config-light-row' } return 'config-dark-row' + }, + handleConfigRefresh (name, updatedRecord) { + this.$emit('refresh-config', name, updatedRecord) } } } diff --git a/ui/src/views/setting/ConfigurationValue.vue b/ui/src/views/setting/ConfigurationValue.vue index 662e5ef142e..531d4e0ea61 100644 --- a/ui/src/views/setting/ConfigurationValue.vue +++ b/ui/src/views/setting/ConfigurationValue.vue @@ -187,7 +187,7 @@ @onClick="$resetConfigurationValueConfirm(configrecord, resetConfigurationValue)" v-if="editableValueKey === null" icon="reload-outlined" - :disabled="(!('resetConfiguration' in $store.getters.apis) || configDisabled || valueLoading)" /> + :disabled="(!('resetConfiguration' in $store.getters.apis) || configDisabled || valueLoading || configrecord.value === configrecord.defaultvalue)" /> @@ -273,6 +273,7 @@ export default { this.editableValueKey = null }, updateConfigurationValue (configrecord) { + let configRecordEntry = this.configrecord this.valueLoading = true this.editableValueKey = null var newValue = this.editableValue @@ -294,7 +295,8 @@ export default { params[this.scopeKey] = this.resource?.id } postAPI('updateConfiguration', params).then(json => { - this.editableValue = this.getEditableValue(json.updateconfigurationresponse.configuration) + configRecordEntry = json.updateconfigurationresponse.configuration + this.editableValue = this.getEditableValue(configRecordEntry) this.actualValue = this.editableValue this.$emit('change-config', { value: newValue }) this.$store.dispatch('RefreshFeatures') @@ -318,10 +320,11 @@ export default { }) }).finally(() => { this.valueLoading = false - this.$emit('refresh') + this.$emit('refresh', configrecord.name, configRecordEntry) }) }, resetConfigurationValue (configrecord) { + let configRecordEntry = this.configrecord this.valueLoading = true this.editableValueKey = null const params = { @@ -332,7 +335,8 @@ export default { params[this.scopeKey] = this.resource?.id } postAPI('resetConfiguration', params).then(json => { - this.editableValue = this.getEditableValue(json.resetconfigurationresponse.configuration) + configRecordEntry = json.resetconfigurationresponse.configuration + this.editableValue = this.getEditableValue(configRecordEntry) this.actualValue = this.editableValue var newValue = this.editableValue if (configrecord.type === 'Range') { @@ -360,7 +364,7 @@ export default { }) }).finally(() => { this.valueLoading = false - this.$emit('refresh') + this.$emit('refresh', configrecord.name, configRecordEntry) }) }, getEditableValue (configrecord) { From dd0b863e22579caa77e399855df690d350c80d3e Mon Sep 17 00:00:00 2001 From: Edward-x <30854794+YLChen-007@users.noreply.github.com> Date: Wed, 28 Jan 2026 12:41:23 +0800 Subject: [PATCH 056/126] sensitive information leak to log (#12018) * sensitive information leak to log * Update agent/src/main/java/com/cloud/agent/resource/consoleproxy/ConsoleProxyResource.java * Update core/src/main/java/com/cloud/storage/template/HttpTemplateDownloader.java * Update engine/schema/src/main/java/com/cloud/upgrade/DatabaseCreator.java * Update plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDnsmasqResource.java * Update plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDnsmasqResource.java * Update plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalKickStartPxeResource.java * Update plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java * Update plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java * Update plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java * Update utils/src/main/java/com/cloud/utils/UriUtils.java Co-authored-by: dahn * Update plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalKickStartPxeResource.java Co-authored-by: Abhisar Sinha <63767682+abh1sar@users.noreply.github.com> * Sync with 4.20 and fix conflict in BaremetalPingPxeResource * Apply suggestions from code review Co-authored-by: Suresh Kumar Anaparti --------- Co-authored-by: chenyoulong20g@ict.ac.cn Co-authored-by: dahn Co-authored-by: dahn Co-authored-by: Abhisar Sinha <63767682+abh1sar@users.noreply.github.com> Co-authored-by: Suresh Kumar Anaparti --- .../resource/consoleproxy/ConsoleProxyResource.java | 2 +- .../networkservice/BaremetalDnsmasqResource.java | 4 ++-- .../networkservice/BaremetalKickStartPxeResource.java | 6 +++--- .../networkservice/BaremetalPingPxeResource.java | 10 +++++----- utils/src/main/java/com/cloud/utils/UriUtils.java | 8 ++++++-- 5 files changed, 17 insertions(+), 13 deletions(-) diff --git a/agent/src/main/java/com/cloud/agent/resource/consoleproxy/ConsoleProxyResource.java b/agent/src/main/java/com/cloud/agent/resource/consoleproxy/ConsoleProxyResource.java index b0b1e487a26..83b11418f2c 100644 --- a/agent/src/main/java/com/cloud/agent/resource/consoleproxy/ConsoleProxyResource.java +++ b/agent/src/main/java/com/cloud/agent/resource/consoleproxy/ConsoleProxyResource.java @@ -331,7 +331,7 @@ public class ConsoleProxyResource extends ServerResourceBase implements ServerRe final Object resource = this; logger.info("Building class loader for com.cloud.consoleproxy.ConsoleProxy"); if (consoleProxyMain == null) { - logger.info("Running com.cloud.consoleproxy.ConsoleProxy with encryptor password={}", encryptorPassword); + logger.info("Running com.cloud.consoleproxy.ConsoleProxy"); consoleProxyMain = new Thread(new ManagedContextRunnable() { @Override protected void runInContext() { diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDnsmasqResource.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDnsmasqResource.java index 51acfe93d39..8e7efedfca3 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDnsmasqResource.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDnsmasqResource.java @@ -46,10 +46,10 @@ public class BaremetalDnsmasqResource extends BaremetalDhcpResourceBase { com.trilead.ssh2.Connection sshConnection = null; try { super.configure(name, params); - logger.debug(String.format("Trying to connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s)", _ip, _username, _password)); + logger.debug(String.format("Trying to connect to DHCP server(IP=%1$s, username=%2$s", _ip, _username)); sshConnection = SSHCmdHelper.acquireAuthorizedConnection(_ip, _username, _password); if (sshConnection == null) { - throw new ConfigurationException(String.format("Cannot connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); + throw new ConfigurationException(String.format("Cannot connect to DHCP server(IP=%1$s, username=%2$s", _ip, _username)); } if (!SSHCmdHelper.sshExecuteCmd(sshConnection, "[ -f '/usr/sbin/dnsmasq' ]")) { diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalKickStartPxeResource.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalKickStartPxeResource.java index 3775f4effc1..88c4dea96b3 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalKickStartPxeResource.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalKickStartPxeResource.java @@ -130,8 +130,8 @@ public class BaremetalKickStartPxeResource extends BaremetalPxeResourceBase { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { - logger.debug("SSH Failed to authenticate"); - throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); + logger.debug("SSH Failed to authenticate with user {} credentials", _username); + throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s", _ip, _username)); } String script = String.format("python /usr/bin/baremetal_user_data.py '%s'", arg); @@ -167,7 +167,7 @@ public class BaremetalKickStartPxeResource extends BaremetalPxeResourceBase { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { logger.debug("SSH Failed to authenticate"); - throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); + throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s", _ip, _username)); } String copyTo = String.format("%s/%s", _tftpDir, cmd.getTemplateUuid()); diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java index 96b2dbfeb93..a54cd4a1a11 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java @@ -101,7 +101,7 @@ public class BaremetalPingPxeResource extends BaremetalPxeResourceBase { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { logger.debug("SSH Failed to authenticate"); - throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, "******")); + throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=******", _ip, _username)); } String cmd = String.format("[ -f /%1$s/pxelinux.0 ] && [ -f /%2$s/kernel ] && [ -f /%3$s/initrd.gz ] ", _tftpDir, _tftpDir, _tftpDir); @@ -150,8 +150,8 @@ public class BaremetalPingPxeResource extends BaremetalPxeResourceBase { try { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { - logger.debug("SSH Failed to authenticate"); - throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); + logger.debug("SSH Failed to authenticate with user {} credentials", _username); + throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s", _ip, _username)); } String script = @@ -179,7 +179,7 @@ public class BaremetalPingPxeResource extends BaremetalPxeResourceBase { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { logger.debug("SSH Failed to authenticate"); - throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); + throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s", _ip, _username)); } String script = @@ -237,7 +237,7 @@ public class BaremetalPingPxeResource extends BaremetalPxeResourceBase { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { logger.debug("SSH Failed to authenticate"); - throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); + throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s", _ip, _username)); } String script = String.format("python /usr/bin/baremetal_user_data.py '%s'", arg); diff --git a/utils/src/main/java/com/cloud/utils/UriUtils.java b/utils/src/main/java/com/cloud/utils/UriUtils.java index 961c121597f..4722e3c540a 100644 --- a/utils/src/main/java/com/cloud/utils/UriUtils.java +++ b/utils/src/main/java/com/cloud/utils/UriUtils.java @@ -500,8 +500,12 @@ public class UriUtils { if ((user != null) && (password != null)) { httpclient.getParams().setAuthenticationPreemptive(true); Credentials defaultcreds = new UsernamePasswordCredentials(user, password); - httpclient.getState().setCredentials(new AuthScope(hostAndPort.first(), hostAndPort.second(), AuthScope.ANY_REALM), defaultcreds); - LOGGER.info("Added username=" + user + ", password=" + password + "for host " + hostAndPort.first() + ":" + hostAndPort.second()); + httpclient.getState().setCredentials( + new AuthScope(hostAndPort.first(), hostAndPort.second(), AuthScope.ANY_REALM), defaultcreds); + LOGGER.info("Added username={} along with password for host {}:{}" + , user + , hostAndPort.first() + , hostAndPort.second()); } // Execute the method. GetMethod method = new GetMethod(url); From 66665b883c1392ebb0af09544681971dcc00e046 Mon Sep 17 00:00:00 2001 From: Tonitzpp <134986282+Tonitzpp@users.noreply.github.com> Date: Wed, 28 Jan 2026 01:42:57 -0300 Subject: [PATCH 057/126] Changed error message when snapshot is not on secondary when trying to perform download (#12462) Co-authored-by: toni.zamparetti --- .../java/com/cloud/storage/snapshot/SnapshotManagerImpl.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java index ff9989acac3..1db48658446 100755 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -578,8 +578,9 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement } if (ObjectUtils.anyNull(chosenStore, snapshotDataStoreReference)) { - logger.error("Snapshot [{}] not found in any secondary storage.", snapshot); - throw new InvalidParameterValueException("Snapshot not found."); + String errorMessage = String.format("Snapshot [%s] not found in any secondary storage. The snapshot may be on primary storage, where it cannot be downloaded.", snapshot.getUuid()); + logger.error(errorMessage); + throw new InvalidParameterValueException(errorMessage); } snapshotSrv.syncVolumeSnapshotsToRegionStore(snapshot.getVolumeId(), chosenStore); From 062b98a51eca7da6b6083cbb31c2fb0608448386 Mon Sep 17 00:00:00 2001 From: cheng102e <38267524+cheng102e@users.noreply.github.com> Date: Wed, 28 Jan 2026 12:45:11 +0800 Subject: [PATCH 058/126] fix: clean magic value, and update if-else to switch (#8848) * fix: clean magic value, and update if-else to switch * fix: return the (String args[]) * review --------- Co-authored-by: jiejc1 Co-authored-by: Suresh Kumar Anaparti --- .../src/main/java/common/Client.java | 34 +++++++++++-------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/services/console-proxy/rdpconsole/src/main/java/common/Client.java b/services/console-proxy/rdpconsole/src/main/java/common/Client.java index 742f5c9f0cd..972d5d753e8 100644 --- a/services/console-proxy/rdpconsole/src/main/java/common/Client.java +++ b/services/console-proxy/rdpconsole/src/main/java/common/Client.java @@ -210,7 +210,6 @@ public class Client { public void runClient(String[] args) { try { - Protocol protocol = parseOptions(args); if (protocol == Protocol.NONE) return; @@ -299,21 +298,28 @@ public class Client { private Protocol parseOptions(String[] args) { String protocolName = (args.length > 0) ? args[0] : ""; - Protocol protocol = Protocol.NONE; + Protocol protocol; Option[] options; - if (protocolName.equals("vnc")) { - protocol = Protocol.VNC; - options = join(commonOptions, vncOptions); - } else if (protocolName.equals("rdp")) { - protocol = Protocol.RDP; - options = join(commonOptions, rdpOptions); - } else if (protocolName.equals("hyperv")) { - protocol = Protocol.HYPERV; - options = join(commonOptions, hyperVOptions); - } else { - help(); - return Protocol.NONE; + try { + protocol = Protocol.valueOf(protocolName); + } catch (IllegalArgumentException e) { + protocol = Protocol.NONE; + } + + switch (protocol) { + case VNC: + options = join(commonOptions, vncOptions); + break; + case RDP: + options = join(commonOptions, rdpOptions); + break; + case HYPERV: + options = join(commonOptions, hyperVOptions); + break; + default: + help(); + return Protocol.NONE; } // Parse all options for given protocol From 21d5c10850111cd2e76d783b95b090da623ae024 Mon Sep 17 00:00:00 2001 From: Manoj Kumar Date: Wed, 28 Jan 2026 10:55:59 +0530 Subject: [PATCH 059/126] Apply reordered ACL list to VR router (#12525) This PR address #9398 --- .../network/element/VpcVirtualRouterElement.java | 10 +++++++++- .../cloud/network/vpc/NetworkACLServiceImpl.java | 15 ++++++++++++--- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java b/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java index 3d613fca18e..f393ef8a129 100644 --- a/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java +++ b/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java @@ -550,7 +550,15 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc @Override public boolean reorderAclRules(Vpc vpc, List networks, List networkACLItems) { - return true; + boolean result = true; + try { + for (Network network : networks) { + result = result && applyNetworkACLs(network, networkACLItems); + } + } catch (ResourceUnavailableException ex) { + result = false; + } + return result; } @Override diff --git a/server/src/main/java/com/cloud/network/vpc/NetworkACLServiceImpl.java b/server/src/main/java/com/cloud/network/vpc/NetworkACLServiceImpl.java index ecb164018ac..7460ae87d44 100644 --- a/server/src/main/java/com/cloud/network/vpc/NetworkACLServiceImpl.java +++ b/server/src/main/java/com/cloud/network/vpc/NetworkACLServiceImpl.java @@ -109,6 +109,8 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ private NsxProviderDao nsxProviderDao; @Inject private NetrisProviderDao netrisProviderDao; + @Inject + private VpcManager vpcManager; private String supportedProtocolsForAclRules = "tcp,udp,icmp,all"; @@ -1037,13 +1039,20 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ if (Objects.isNull(vpc)) { return networkACLItem; } + List networks = _networkDao.listByAclId(lockedAcl.getId()); + if (networks.isEmpty()) { + return networkACLItem; + } + final DataCenter dc = _entityMgr.findById(DataCenter.class, vpc.getZoneId()); final NsxProviderVO nsxProvider = nsxProviderDao.findByZoneId(dc.getId()); final NetrisProviderVO netrisProvider = netrisProviderDao.findByZoneId(dc.getId()); - List networks = _networkDao.listByAclId(lockedAcl.getId()); - if (ObjectUtils.anyNotNull(nsxProvider, netrisProvider) && !networks.isEmpty()) { + boolean isVpcNetworkACLProvider = vpcManager.isProviderSupportServiceInVpc(vpc.getId(), Network.Service.NetworkACL, Network.Provider.VPCVirtualRouter); + + if (ObjectUtils.anyNotNull(nsxProvider, netrisProvider) || isVpcNetworkACLProvider) { allAclRules = getAllAclRulesSortedByNumber(lockedAcl.getId()); - Network.Provider networkProvider = nsxProvider != null ? Network.Provider.Nsx : Network.Provider.Netris; + Network.Provider networkProvider = isVpcNetworkACLProvider ? Network.Provider.VPCVirtualRouter + : (nsxProvider != null ? Network.Provider.Nsx : Network.Provider.Netris); _networkAclMgr.reorderAclRules(vpc, networks, allAclRules, networkProvider); } return networkACLItem; From 572aa1956493d58a0efa283d2f75d876d92a5ad9 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Wed, 28 Jan 2026 11:01:53 +0530 Subject: [PATCH 060/126] ui: show usage server restart message on usage config change (#11969) Fixes #10853 --------- Signed-off-by: Abhishek Kumar --- ui/public/locales/en.json | 1 + ui/src/components/view/ListView.vue | 10 +--------- ui/src/utils/plugins.js | 11 +++++++++++ ui/src/views/setting/ConfigurationValue.vue | 20 ++------------------ 4 files changed, 15 insertions(+), 27 deletions(-) diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 275b8dbb0fe..74715496434 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -3735,6 +3735,7 @@ "message.resource.not.found": "Resource not found.", "message.restart.mgmt.server": "Please restart your management server(s) for your new settings to take effect.", "message.restart.network": "All services provided by this Network will be interrupted. Please confirm that you want to restart this Network.", +"message.restart.usage.server": "Please restart your usage server(s) for your new settings to take effect.", "message.restart.vm.to.update.settings": "Update in fields other than name and display name will require the Instance to be restarted.", "message.restart.vpc": "Please confirm that you want to restart the VPC.", "message.restart.vpc.remark": "Please confirm that you want to restart the VPC

Remark: making a non-redundant VPC redundant will force a clean up. The Networks will not be available for a couple of minutes.

", diff --git a/ui/src/components/view/ListView.vue b/ui/src/components/view/ListView.vue index 168e355cbc8..79ec5a18207 100644 --- a/ui/src/components/view/ListView.vue +++ b/ui/src/components/view/ListView.vue @@ -1234,15 +1234,7 @@ export default { this.editableValueKey = null this.$store.dispatch('RefreshFeatures') this.$messageConfigSuccess(`${this.$t('message.setting.updated')} ${record.name}`, record) - if (json.updateconfigurationresponse && - json.updateconfigurationresponse.configuration && - !json.updateconfigurationresponse.configuration.isdynamic && - ['Admin'].includes(this.$store.getters.userInfo.roletype)) { - this.$notification.warning({ - message: this.$t('label.status'), - description: this.$t('message.restart.mgmt.server') - }) - } + this.$notifyConfigurationValueChange(json?.updateconfigurationresponse?.configuration || null) }).catch(error => { console.error(error) this.$message.error(this.$t('message.error.save.setting')) diff --git a/ui/src/utils/plugins.js b/ui/src/utils/plugins.js index 306eb9d1f59..729cef84d02 100644 --- a/ui/src/utils/plugins.js +++ b/ui/src/utils/plugins.js @@ -550,6 +550,17 @@ export const dialogUtilPlugin = { onOk: () => callback(configRecord) }) } + + app.config.globalProperties.$notifyConfigurationValueChange = function (configRecord) { + if (!configRecord || configRecord.isdynamic || store.getters.userInfo?.roletype !== 'Admin') { + return + } + const server = configRecord.group === 'Usage Server' ? 'usage' : 'mgmt' + this.$notification.warning({ + message: this.$t('label.status'), + description: this.$t('message.restart.' + server + '.server') + }) + } } } diff --git a/ui/src/views/setting/ConfigurationValue.vue b/ui/src/views/setting/ConfigurationValue.vue index 662e5ef142e..31c0798a717 100644 --- a/ui/src/views/setting/ConfigurationValue.vue +++ b/ui/src/views/setting/ConfigurationValue.vue @@ -299,15 +299,7 @@ export default { this.$emit('change-config', { value: newValue }) this.$store.dispatch('RefreshFeatures') this.$messageConfigSuccess(`${this.$t('message.setting.updated')} ${configrecord.name}`, configrecord) - if (json.updateconfigurationresponse && - json.updateconfigurationresponse.configuration && - !json.updateconfigurationresponse.configuration.isdynamic && - ['Admin'].includes(this.$store.getters.userInfo.roletype)) { - this.$notification.warning({ - message: this.$t('label.status'), - description: this.$t('message.restart.mgmt.server') - }) - } + this.$notifyConfigurationValueChange(json?.updateconfigurationresponse?.configuration || null) }).catch(error => { this.editableValue = this.actualValue console.error(error) @@ -341,15 +333,7 @@ export default { this.$emit('change-config', { value: newValue }) this.$store.dispatch('RefreshFeatures') this.$messageConfigSuccess(`${this.$t('label.setting')} ${configrecord.name} ${this.$t('label.reset.config.value')}`, configrecord) - if (json.resetconfigurationresponse && - json.resetconfigurationresponse.configuration && - !json.resetconfigurationresponse.configuration.isdynamic && - ['Admin'].includes(this.$store.getters.userInfo.roletype)) { - this.$notification.warning({ - message: this.$t('label.status'), - description: this.$t('message.restart.mgmt.server') - }) - } + this.$notifyConfigurationValueChange(json?.resetconfigurationresponse?.configuration || null) }).catch(error => { this.editableValue = this.actualValue console.error(error) From 2bfc9cb8eb621cead6b6c8067a878126f1b0c7b5 Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Wed, 28 Jan 2026 06:47:14 +0100 Subject: [PATCH 061/126] CKS: skip default egress policy check for vpc network offerings (#11998) This PR fixes #11995 Steps to reproduce the issue - create a vpc - create a vpc tier with default offering `DefaultIsolatedNetworkOfferingForVpcNetworks` - register CKS ISO - create CKS on the vpc tier expected: succeed actual: failed with error `Kubernetes service has not been configured properly to provision Kubernetes clusters` --- .../cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index cf4bbce098a..d19470f8bab 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -477,7 +477,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne logger.warn("Network offering: {} does not have necessary services to provision Kubernetes cluster", networkOffering); return false; } - if (!networkOffering.isEgressDefaultPolicy()) { + if (!networkOffering.isForVpc() && !networkOffering.isEgressDefaultPolicy()) { logger.warn("Network offering: {} has egress default policy turned off should be on to provision Kubernetes cluster", networkOffering); return false; } From 70d4c9d1baa5f6e7696b6311e7eeaaa2c4f0de3b Mon Sep 17 00:00:00 2001 From: Fabricio Duarte Date: Wed, 28 Jan 2026 02:48:31 -0300 Subject: [PATCH 062/126] Consider secondary storage selectors during cold volume migration (#10957) The secondary storage selectors allow operators to specify, for instance, that volumes should go to a specific secondary storage A. Thus, when uploading a volume, it will always be downloaded to secondary storage A. The cold volume migration moves volumes to a secondary storage before moving them to the destination primary storage. This process does not consider the secondary storage selectors. However, some companies want to dedicate specific secondary storages for cold migration. To address this, this PR makes the cold volume migration process consider the secondary storage selectors. --- .../motion/AncientDataMotionStrategy.java | 13 ++++++++++++- .../storage/heuristics/HeuristicRuleHelper.java | 16 ++++++++-------- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java index b59ee2c6166..4cb09fb81f7 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java @@ -45,10 +45,12 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.secstorage.heuristics.HeuristicType; import org.apache.cloudstack.storage.RemoteHostEndPoint; import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; +import org.apache.cloudstack.storage.heuristics.HeuristicRuleHelper; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.logging.log4j.Logger; @@ -104,6 +106,9 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { @Inject SnapshotDao snapshotDao; + @Inject + HeuristicRuleHelper heuristicRuleHelper; + @Override public StrategyPriority canHandle(DataObject srcData, DataObject destData) { return StrategyPriority.DEFAULT; @@ -374,7 +379,13 @@ public class AncientDataMotionStrategy implements DataMotionStrategy { } // need to find a nfs or cifs image store, assuming that can't copy volume // directly to s3 - ImageStoreEntity imageStore = (ImageStoreEntity)dataStoreMgr.getImageStoreWithFreeCapacity(destScope.getScopeId()); + Long zoneId = destScope.getScopeId(); + ImageStoreEntity imageStore = (ImageStoreEntity) heuristicRuleHelper.getImageStoreIfThereIsHeuristicRule(zoneId, HeuristicType.VOLUME, destData); + if (imageStore == null) { + logger.debug("Secondary storage selector did not direct volume migration to a specific secondary storage; using secondary storage with the most free capacity."); + imageStore = (ImageStoreEntity) dataStoreMgr.getImageStoreWithFreeCapacity(zoneId); + } + if (imageStore == null || !imageStore.getProtocol().equalsIgnoreCase("nfs") && !imageStore.getProtocol().equalsIgnoreCase("cifs")) { String errMsg = "can't find a nfs (or cifs) image store to satisfy the need for a staging store"; Answer answer = new Answer(null, false, errMsg); diff --git a/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java b/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java index 21a34de0d23..2e0780e7fe8 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java +++ b/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java @@ -117,8 +117,8 @@ public class HeuristicRuleHelper { accountId = ((SnapshotInfo) obj).getAccountId(); break; case VOLUME: - presetVariables.setVolume(setVolumePresetVariable((VolumeVO) obj)); - accountId = ((VolumeVO) obj).getAccountId(); + presetVariables.setVolume(setVolumePresetVariable((com.cloud.storage.Volume) obj)); + accountId = ((com.cloud.storage.Volume) obj).getAccountId(); break; } presetVariables.setAccount(setAccountPresetVariable(accountId)); @@ -191,14 +191,14 @@ public class HeuristicRuleHelper { return template; } - protected Volume setVolumePresetVariable(VolumeVO volumeVO) { - Volume volume = new Volume(); + protected Volume setVolumePresetVariable(com.cloud.storage.Volume volumeVO) { + Volume volumePresetVariable = new Volume(); - volume.setName(volumeVO.getName()); - volume.setFormat(volumeVO.getFormat()); - volume.setSize(volumeVO.getSize()); + volumePresetVariable.setName(volumeVO.getName()); + volumePresetVariable.setFormat(volumeVO.getFormat()); + volumePresetVariable.setSize(volumeVO.getSize()); - return volume; + return volumePresetVariable; } protected Snapshot setSnapshotPresetVariable(SnapshotInfo snapshotInfo) { From 4761935145e100622df083dedffec612482b16d2 Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Wed, 28 Jan 2026 06:59:31 +0100 Subject: [PATCH 063/126] server: add options for kvm.guest.os.machine.type (#12414) --- server/src/main/java/com/cloud/api/query/QueryManagerImpl.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index 3b232948395..d42dbaec6de 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -5398,6 +5398,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q options.put(VmDetailConstants.VIRTUAL_TPM_VERSION, Arrays.asList("1.2", "2.0")); options.put(VmDetailConstants.GUEST_CPU_MODE, Arrays.asList("custom", "host-model", "host-passthrough")); options.put(VmDetailConstants.GUEST_CPU_MODEL, Collections.emptyList()); + options.put(VmDetailConstants.KVM_GUEST_OS_MACHINE_TYPE, Collections.emptyList()); options.put(VmDetailConstants.KVM_SKIP_FORCE_DISK_CONTROLLER, Arrays.asList("true", "false")); } From 0dcbe57a47875d45ee7effa92451a331c5bccc10 Mon Sep 17 00:00:00 2001 From: Edward-x <30854794+YLChen-007@users.noreply.github.com> Date: Wed, 28 Jan 2026 14:56:44 +0800 Subject: [PATCH 064/126] Fix that Sensitive information logged in SshHelper.sshExecute method (#12026) * Sensitive information logged in SshHelper.sshExecute method * Fix that Sensitive information logged in SshHelper.sshExecute method2 * Fix sensitive information handling in SshHelper and its tests --------- Co-authored-by: chenyoulong20g@ict.ac.cn --- .../java/com/cloud/utils/ssh/SshHelper.java | 73 ++++++++++++++++++- .../com/cloud/utils/ssh/SshHelperTest.java | 60 +++++++++++++++ 2 files changed, 129 insertions(+), 4 deletions(-) diff --git a/utils/src/main/java/com/cloud/utils/ssh/SshHelper.java b/utils/src/main/java/com/cloud/utils/ssh/SshHelper.java index 87221ab5ac8..caf2b28c52f 100644 --- a/utils/src/main/java/com/cloud/utils/ssh/SshHelper.java +++ b/utils/src/main/java/com/cloud/utils/ssh/SshHelper.java @@ -23,6 +23,8 @@ import java.io.File; import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; @@ -40,6 +42,23 @@ public class SshHelper { private static final int DEFAULT_CONNECT_TIMEOUT = 180000; private static final int DEFAULT_KEX_TIMEOUT = 60000; private static final int DEFAULT_WAIT_RESULT_TIMEOUT = 120000; + private static final String MASKED_VALUE = "*****"; + + private static final Pattern[] SENSITIVE_COMMAND_PATTERNS = new Pattern[] { + Pattern.compile("(?i)(\\s+-p\\s+['\"])([^'\"]*)(['\"])"), + Pattern.compile("(?i)(\\s+-p\\s+)([^\\s]+)"), + Pattern.compile("(?i)(\\s+-p=['\"])([^'\"]*)(['\"])"), + Pattern.compile("(?i)(\\s+-p=)([^\\s]+)"), + Pattern.compile("(?i)(--password=['\"])([^'\"]*)(['\"])"), + Pattern.compile("(?i)(--password=)([^\\s]+)"), + Pattern.compile("(?i)(--password\\s+['\"])([^'\"]*)(['\"])"), + Pattern.compile("(?i)(--password\\s+)([^\\s]+)"), + Pattern.compile("(?i)(\\s+-u\\s+['\"][^,'\":]+[,:])([^'\"]*)(['\"])"), + Pattern.compile("(?i)(\\s+-u\\s+[^\\s,:]+[,:])([^\\s]+)"), + Pattern.compile("(?i)(\\s+-s\\s+['\"])([^'\"]*)(['\"])"), + Pattern.compile("(?i)(\\s+-s\\s+)([^\\s]+)"), + + }; protected static Logger LOGGER = LogManager.getLogger(SshHelper.class); @@ -145,7 +164,7 @@ public class SshHelper { } public static void scpTo(String host, int port, String user, File pemKeyFile, String password, String remoteTargetDirectory, String[] localFiles, String fileMode, - int connectTimeoutInMs, int kexTimeoutInMs) throws Exception { + int connectTimeoutInMs, int kexTimeoutInMs) throws Exception { com.trilead.ssh2.Connection conn = null; com.trilead.ssh2.SCPClient scpClient = null; @@ -291,13 +310,16 @@ public class SshHelper { } if (sess.getExitStatus() == null) { - //Exit status is NOT available. Returning failure result. - LOGGER.error(String.format("SSH execution of command %s has no exit status set. Result output: %s", command, result)); + // Exit status is NOT available. Returning failure result. + LOGGER.error(String.format("SSH execution of command %s has no exit status set. Result output: %s", + sanitizeForLogging(command), sanitizeForLogging(result))); return new Pair(false, result); } if (sess.getExitStatus() != null && sess.getExitStatus().intValue() != 0) { - LOGGER.error(String.format("SSH execution of command %s has an error status code in return. Result output: %s", command, result)); + LOGGER.error(String.format( + "SSH execution of command %s has an error status code in return. Result output: %s", + sanitizeForLogging(command), sanitizeForLogging(result))); return new Pair(false, result); } return new Pair(true, result); @@ -366,4 +388,47 @@ public class SshHelper { throw new SshException(msg); } } + + private static String sanitizeForLogging(String value) { + if (value == null) { + return null; + } + String masked = maskSensitiveValue(value); + String cleaned = com.cloud.utils.StringUtils.cleanString(masked); + if (StringUtils.isBlank(cleaned)) { + return masked; + } + return cleaned; + } + + private static String maskSensitiveValue(String value) { + String masked = value; + for (Pattern pattern : SENSITIVE_COMMAND_PATTERNS) { + masked = replaceWithMask(masked, pattern); + } + return masked; + } + + private static String replaceWithMask(String value, Pattern pattern) { + Matcher matcher = pattern.matcher(value); + if (!matcher.find()) { + return value; + } + + StringBuffer buffer = new StringBuffer(); + do { + StringBuilder replacement = new StringBuilder(); + replacement.append(matcher.group(1)); + if (matcher.groupCount() >= 3) { + replacement.append(MASKED_VALUE); + replacement.append(matcher.group(matcher.groupCount())); + } else { + replacement.append(MASKED_VALUE); + } + matcher.appendReplacement(buffer, Matcher.quoteReplacement(replacement.toString())); + } while (matcher.find()); + + matcher.appendTail(buffer); + return buffer.toString(); + } } diff --git a/utils/src/test/java/com/cloud/utils/ssh/SshHelperTest.java b/utils/src/test/java/com/cloud/utils/ssh/SshHelperTest.java index 61d746bc12d..8a14f60527b 100644 --- a/utils/src/test/java/com/cloud/utils/ssh/SshHelperTest.java +++ b/utils/src/test/java/com/cloud/utils/ssh/SshHelperTest.java @@ -21,6 +21,7 @@ package com.cloud.utils.ssh; import java.io.IOException; import java.io.InputStream; +import java.lang.reflect.Method; import org.junit.Assert; import org.junit.Test; @@ -140,4 +141,63 @@ public class SshHelperTest { Mockito.verify(conn).openSession(); } + + @Test + public void sanitizeForLoggingMasksShortPasswordFlag() throws Exception { + String command = "/opt/cloud/bin/script -v 10.0.0.1 -p superSecret"; + String sanitized = invokeSanitizeForLogging(command); + + Assert.assertTrue("Sanitized command should retain flag", sanitized.contains("-p *****")); + Assert.assertFalse("Sanitized command should not contain original password", sanitized.contains("superSecret")); + } + + @Test + public void sanitizeForLoggingMasksQuotedPasswordFlag() throws Exception { + String command = "/opt/cloud/bin/script -v 10.0.0.1 -p \"super Secret\""; + String sanitized = invokeSanitizeForLogging(command); + + Assert.assertTrue("Sanitized command should retain quoted flag", sanitized.contains("-p *****")); + Assert.assertFalse("Sanitized command should not contain original password", + sanitized.contains("super Secret")); + } + + @Test + public void sanitizeForLoggingMasksLongPasswordAssignments() throws Exception { + String command = "tool --password=superSecret"; + String sanitized = invokeSanitizeForLogging(command); + + Assert.assertTrue("Sanitized command should retain assignment", sanitized.contains("--password=*****")); + Assert.assertFalse("Sanitized command should not contain original password", sanitized.contains("superSecret")); + } + + @Test + public void sanitizeForLoggingMasksUsernamePasswordPairs() throws Exception { + String command = "/opt/cloud/bin/vpn_l2tp.sh -u alice,topSecret"; + String sanitized = invokeSanitizeForLogging(command); + + Assert.assertTrue("Sanitized command should retain username and mask password", + sanitized.contains("-u alice,*****")); + Assert.assertFalse("Sanitized command should not contain original password", sanitized.contains("topSecret")); + } + + @Test + public void sanitizeForLoggingMasksUsernamePasswordPairsWithColon() throws Exception { + String command = "curl -u alice:topSecret https://example.com"; + String sanitized = invokeSanitizeForLogging(command); + + Assert.assertTrue("Sanitized command should retain username and mask password", + sanitized.contains("-u alice:*****")); + Assert.assertFalse("Sanitized command should not contain original password", sanitized.contains("topSecret")); + } + + @Test + public void sanitizeForLoggingHandlesNullValues() throws Exception { + Assert.assertNull(invokeSanitizeForLogging(null)); + } + + private String invokeSanitizeForLogging(String value) throws Exception { + Method method = SshHelper.class.getDeclaredMethod("sanitizeForLogging", String.class); + method.setAccessible(true); + return (String) method.invoke(null, value); + } } From 1b2ae13df74ef3f22f8e5b40b4cf45a10863205f Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Wed, 28 Jan 2026 12:40:34 +0530 Subject: [PATCH 065/126] ui: add cache for oslogo request using osId (#11422) When OsLogo component is used in the items of a list having same OS type it was causing listOsTypes API call multiple time. This change allows caching request and response value for 30 seconds. Caching behaviour is controlled using `useCache` flag. Signed-off-by: Abhishek Kumar --- ui/src/components/widgets/OsLogo.vue | 78 ++++++++++--------- .../compute/wizard/OsBasedImageRadioGroup.vue | 3 +- 2 files changed, 42 insertions(+), 39 deletions(-) diff --git a/ui/src/components/widgets/OsLogo.vue b/ui/src/components/widgets/OsLogo.vue index 643953012c1..f19aac56a1a 100644 --- a/ui/src/components/widgets/OsLogo.vue +++ b/ui/src/components/widgets/OsLogo.vue @@ -31,6 +31,9 @@ - - diff --git a/ui/src/views/compute/wizard/OsBasedImageRadioGroup.vue b/ui/src/views/compute/wizard/OsBasedImageRadioGroup.vue index 2518ed0c042..45ea347553c 100644 --- a/ui/src/views/compute/wizard/OsBasedImageRadioGroup.vue +++ b/ui/src/views/compute/wizard/OsBasedImageRadioGroup.vue @@ -42,7 +42,8 @@ class="radio-group__os-logo" size="2x" :osId="item.ostypeid" - :os-name="item.osName" /> + :os-name="item.osName" + :use-cache="true" />   {{ item.displaytext }} From 7001d43dbfa7a0e5ca68d527754570596efcb180 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Jan 2026 14:09:29 +0530 Subject: [PATCH 066/126] Bump org.codehaus.mojo:properties-maven-plugin from 1.0-alpha-2 to 1.2.1 (#12508) --- developer/pom.xml | 2 +- tools/devcloud-kvm/pom.xml | 2 +- tools/devcloud4/pom.xml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/developer/pom.xml b/developer/pom.xml index e2fd782fd25..0a0979ee037 100644 --- a/developer/pom.xml +++ b/developer/pom.xml @@ -66,7 +66,7 @@ org.codehaus.mojo properties-maven-plugin - 1.0-alpha-2 + 1.2.1 initialize diff --git a/tools/devcloud-kvm/pom.xml b/tools/devcloud-kvm/pom.xml index a8cd23db979..35cf828a27b 100644 --- a/tools/devcloud-kvm/pom.xml +++ b/tools/devcloud-kvm/pom.xml @@ -56,7 +56,7 @@ org.codehaus.mojo properties-maven-plugin - 1.0-alpha-2 + 1.2.1 initialize diff --git a/tools/devcloud4/pom.xml b/tools/devcloud4/pom.xml index 1af63b439ad..385b49ad88c 100644 --- a/tools/devcloud4/pom.xml +++ b/tools/devcloud4/pom.xml @@ -56,7 +56,7 @@ org.codehaus.mojo properties-maven-plugin - 1.0-alpha-2 + 1.2.1 initialize From 434e472ef814897a985f87b92b1d46e9cfc23eeb Mon Sep 17 00:00:00 2001 From: Tonitzpp <134986282+Tonitzpp@users.noreply.github.com> Date: Wed, 28 Jan 2026 06:10:43 -0300 Subject: [PATCH 067/126] Change to display if public IPs are reserved in the tab (#12461) Co-authored-by: toni.zamparetti --- ui/src/views/infra/network/IpRangesTabPublic.vue | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/ui/src/views/infra/network/IpRangesTabPublic.vue b/ui/src/views/infra/network/IpRangesTabPublic.vue index 81f5656799e..dc6ef671e7c 100644 --- a/ui/src/views/infra/network/IpRangesTabPublic.vue +++ b/ui/src/views/infra/network/IpRangesTabPublic.vue @@ -47,6 +47,9 @@ + @@ -128,10 +131,6 @@
{{ $t('label.domain') }}
{{ selectedItem.domain }}
-
-
{{ $t('label.system.vms') }}
-
{{ selectedItem.forsystemvms }}
-
@@ -449,6 +448,10 @@ export default { key: 'endip', title: this.$t('label.endip') }, + { + key: 'systemvms', + title: this.$t('label.reserved.system.ip') + }, { key: 'actions', title: this.$t('label.actions') From 0e7f74839ae264f08382bfcb6692af4f5f073653 Mon Sep 17 00:00:00 2001 From: dahn Date: Wed, 28 Jan 2026 10:48:27 +0100 Subject: [PATCH 068/126] Add configuration for archiving stale issues (#12293) --- .github/workflows/stale.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index e90c75979b6..c957392c504 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -41,3 +41,10 @@ jobs: days-before-pr-close: 240 exempt-issue-labels: 'gsoc,good-first-issue,long-term-plan' exempt-pr-labels: 'status:ready-for-merge,status:needs-testing,status:on-hold' + days-before-close: -1 + - uses: actions/stale@v10 + with: + stale-issue-label: 'archive' + days-before-stale: 240 + exempt-issue-labels: 'gsoc,good-first-issue,long-term-plan' + days-before-close: -1 From 6932cacabc187cf3d76e53c7979ed10067aff2f2 Mon Sep 17 00:00:00 2001 From: Harikrishna Date: Wed, 28 Jan 2026 16:00:30 +0530 Subject: [PATCH 069/126] Allow copy of templates from secondary storages of other zone when adding a new secondary storage (#12296) * Allow copy of templates from secondary storages of other zone when adding a new secondary storage * Add API param and UI changes on add secondary storage page * Make copy template across zones non blocking * Code fixes * unused imports * Add copy template flag in zone wizard and remove NFS checks * Fix UI * Label fixes * code optimizations * code refactoring * missing changes * Combine template copy and download into a single asynchronous operation * unused import and fixed conflicts * unused code * update config message * Fix configuration setting value on add secondary storage page * Removed unused code * Update unit tests --- .../admin/host/AddSecondaryStorageCmd.java | 24 ++- .../service/StorageOrchestrationService.java | 3 +- .../api/storage/TemplateService.java | 4 +- .../com/cloud/storage/StorageManager.java | 5 +- .../orchestration/StorageOrchestrator.java | 45 +++-- .../storage/image/TemplateServiceImpl.java | 163 ++++++++++++++--- .../image/TemplateServiceImplTest.java | 171 +++++++++++++++++- .../cloud/storage/ImageStoreDetailsUtil.java | 11 ++ .../com/cloud/storage/StorageManagerImpl.java | 2 +- .../cloud/template/TemplateManagerImpl.java | 14 +- ui/public/locales/en.json | 4 +- ui/src/views/infra/AddSecondaryStorage.vue | 82 ++++++++- .../infra/zone/ZoneWizardAddResources.vue | 25 ++- .../views/infra/zone/ZoneWizardLaunchZone.vue | 5 + 14 files changed, 490 insertions(+), 68 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddSecondaryStorageCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddSecondaryStorageCmd.java index 9a7eff7e2e5..585fd1b87a8 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddSecondaryStorageCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddSecondaryStorageCmd.java @@ -29,6 +29,11 @@ import org.apache.cloudstack.api.response.ZoneResponse; import com.cloud.exception.DiscoveryException; import com.cloud.storage.ImageStore; import com.cloud.user.Account; +import org.apache.commons.collections.MapUtils; + +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; @APICommand(name = "addSecondaryStorage", description = "Adds secondary storage.", responseObject = ImageStoreResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) @@ -44,6 +49,9 @@ public class AddSecondaryStorageCmd extends BaseCmd { @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, description = "The Zone ID for the secondary storage") protected Long zoneId; + @Parameter(name = ApiConstants.DETAILS, type = CommandType.MAP, description = "Details in key/value pairs using format details[i].keyname=keyvalue. Example: details[0].copytemplatesfromothersecondarystorages=true") + protected Map details; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -56,6 +64,20 @@ public class AddSecondaryStorageCmd extends BaseCmd { return zoneId; } + public Map getDetails() { + Map detailsMap = new HashMap<>(); + if (MapUtils.isNotEmpty(details)) { + Collection props = details.values(); + for (Object prop : props) { + HashMap detail = (HashMap) prop; + for (Map.Entry entry: detail.entrySet()) { + detailsMap.put(entry.getKey(),entry.getValue()); + } + } + } + return detailsMap; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -68,7 +90,7 @@ public class AddSecondaryStorageCmd extends BaseCmd { @Override public void execute(){ try{ - ImageStore result = _storageService.discoverImageStore(null, getUrl(), "NFS", getZoneId(), null); + ImageStore result = _storageService.discoverImageStore(null, getUrl(), "NFS", getZoneId(), getDetails()); ImageStoreResponse storeResponse = null; if (result != null ) { storeResponse = _responseGenerator.createImageStoreResponse(result); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java index 8be2015bfef..4af0c806060 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java @@ -22,7 +22,6 @@ import java.util.concurrent.Future; import org.apache.cloudstack.api.response.MigrationResponse; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService.TemplateApiResult; import org.apache.cloudstack.storage.ImageStoreService.MigrationPolicy; @@ -31,5 +30,5 @@ public interface StorageOrchestrationService { MigrationResponse migrateResources(Long srcImgStoreId, Long destImgStoreId, List templateIdList, List snapshotIdList); - Future orchestrateTemplateCopyToImageStore(TemplateInfo source, DataStore destStore); + Future orchestrateTemplateCopyFromSecondaryStores(long templateId, DataStore destStore); } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java index a8861d5acc6..269eb4f1c21 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java @@ -80,4 +80,6 @@ public interface TemplateService { List getTemplateDatadisksOnImageStore(TemplateInfo templateInfo, String configurationId); AsyncCallFuture copyTemplateToImageStore(DataObject source, DataStore destStore); -} + + void handleTemplateCopyFromSecondaryStores(long templateId, DataStore destStore); + } diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index de0cb34d63e..4ce1f4a9638 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -220,8 +220,9 @@ public interface StorageManager extends StorageService { "storage.pool.host.connect.workers", "1", "Number of worker threads to be used to connect hosts to a primary storage", true); - ConfigKey COPY_PUBLIC_TEMPLATES_FROM_OTHER_STORAGES = new ConfigKey<>(Boolean.class, "copy.public.templates.from.other.storages", - "Storage", "true", "Allow SSVMs to try copying public templates from one secondary storage to another instead of downloading them from the source.", + ConfigKey COPY_TEMPLATES_FROM_OTHER_SECONDARY_STORAGES = new ConfigKey<>(Boolean.class, "copy.templates.from.other.secondary.storages", + "Storage", "true", "When enabled, this feature allows templates to be copied from existing Secondary Storage servers (within the same zone or across zones) " + + "while adding a new Secondary Storage. If the copy operation fails, the system falls back to downloading the template from the source URL.", true, ConfigKey.Scope.Zone, null); /** diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index 37a1f8dc196..933b4e0c5ce 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -36,6 +36,9 @@ import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.template.TemplateManager; import org.apache.cloudstack.api.response.MigrationResponse; import org.apache.cloudstack.engine.orchestration.service.StorageOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; @@ -45,6 +48,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SecondaryStorageServic import org.apache.cloudstack.engine.subsystem.api.storage.SecondaryStorageService.DataObjectResult; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService.TemplateApiResult; @@ -103,6 +107,15 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra VolumeDataStoreDao volumeDataStoreDao; @Inject DataMigrationUtility migrationHelper; + @Inject + TemplateManager templateManager; + @Inject + VMTemplateDao templateDao; + @Inject + TemplateDataFactory templateDataFactory; + @Inject + DataCenterDao dcDao; + ConfigKey ImageStoreImbalanceThreshold = new ConfigKey<>("Advanced", Double.class, "image.store.imbalance.threshold", @@ -304,8 +317,9 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra } @Override - public Future orchestrateTemplateCopyToImageStore(TemplateInfo source, DataStore destStore) { - return submit(destStore.getScope().getScopeId(), new CopyTemplateTask(source, destStore)); + public Future orchestrateTemplateCopyFromSecondaryStores(long srcTemplateId, DataStore destStore) { + Long dstZoneId = destStore.getScope().getScopeId(); + return submit(dstZoneId, new CopyTemplateFromSecondaryStorageTask(srcTemplateId, destStore)); } protected Pair migrateCompleted(Long destDatastoreId, DataStore srcDatastore, List files, MigrationPolicy migrationPolicy, int skipped) { @@ -624,13 +638,13 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra } } - private class CopyTemplateTask implements Callable { - private TemplateInfo sourceTmpl; - private DataStore destStore; - private String logid; + private class CopyTemplateFromSecondaryStorageTask implements Callable { + private final long srcTemplateId; + private final DataStore destStore; + private final String logid; - public CopyTemplateTask(TemplateInfo sourceTmpl, DataStore destStore) { - this.sourceTmpl = sourceTmpl; + CopyTemplateFromSecondaryStorageTask(long srcTemplateId, DataStore destStore) { + this.srcTemplateId = srcTemplateId; this.destStore = destStore; this.logid = ThreadContext.get(LOGCONTEXTID); } @@ -639,17 +653,16 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra public TemplateApiResult call() { ThreadContext.put(LOGCONTEXTID, logid); TemplateApiResult result; - AsyncCallFuture future = templateService.copyTemplateToImageStore(sourceTmpl, destStore); + long destZoneId = destStore.getScope().getScopeId(); + TemplateInfo sourceTmpl = templateDataFactory.getTemplate(srcTemplateId, DataStoreRole.Image); try { - result = future.get(); - } catch (ExecutionException | InterruptedException e) { - logger.warn("Exception while copying template [{}] from image store [{}] to image store [{}]: {}", - sourceTmpl.getUniqueName(), sourceTmpl.getDataStore().getName(), destStore.getName(), e.toString()); + templateService.handleTemplateCopyFromSecondaryStores(srcTemplateId, destStore); result = new TemplateApiResult(sourceTmpl); - result.setResult(e.getMessage()); + } finally { + tryCleaningUpExecutor(destZoneId); + ThreadContext.clearAll(); } - tryCleaningUpExecutor(destStore.getScope().getScopeId()); - ThreadContext.clearAll(); + return result; } } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java index bee62955051..5fc9bbac352 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java @@ -31,6 +31,8 @@ import java.util.concurrent.ExecutionException; import javax.inject.Inject; +import com.cloud.exception.StorageUnavailableException; +import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.StorageOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; @@ -67,9 +69,11 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.apache.cloudstack.storage.image.store.TemplateObject; import org.apache.cloudstack.storage.to.TemplateObjectTO; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.ThreadContext; import org.springframework.stereotype.Component; import com.cloud.agent.api.Answer; @@ -567,10 +571,7 @@ public class TemplateServiceImpl implements TemplateService { } if (availHypers.contains(tmplt.getHypervisorType())) { - boolean copied = isCopyFromOtherStoragesEnabled(zoneId) && tryCopyingTemplateToImageStore(tmplt, store); - if (!copied) { - tryDownloadingTemplateToImageStore(tmplt, store); - } + storageOrchestrator.orchestrateTemplateCopyFromSecondaryStores(tmplt.getId(), store); } else { logger.info("Skip downloading template {} since current data center does not have hypervisor {}", tmplt, tmplt.getHypervisorType()); } @@ -617,6 +618,16 @@ public class TemplateServiceImpl implements TemplateService { } + @Override + public void handleTemplateCopyFromSecondaryStores(long templateId, DataStore destStore) { + VMTemplateVO template = _templateDao.findById(templateId); + long zoneId = destStore.getScope().getScopeId(); + boolean copied = imageStoreDetailsUtil.isCopyTemplatesFromOtherStoragesEnabled(destStore.getId(), zoneId) && tryCopyingTemplateToImageStore(template, destStore); + if (!copied) { + tryDownloadingTemplateToImageStore(template, destStore); + } + } + protected void tryDownloadingTemplateToImageStore(VMTemplateVO tmplt, DataStore destStore) { if (tmplt.getUrl() == null) { logger.info("Not downloading template [{}] to image store [{}], as it has no URL.", tmplt.getUniqueName(), @@ -634,28 +645,134 @@ public class TemplateServiceImpl implements TemplateService { } protected boolean tryCopyingTemplateToImageStore(VMTemplateVO tmplt, DataStore destStore) { - Long zoneId = destStore.getScope().getScopeId(); - List storesInZone = _storeMgr.getImageStoresByZoneIds(zoneId); - for (DataStore sourceStore : storesInZone) { - Map existingTemplatesInSourceStore = listTemplate(sourceStore); - if (existingTemplatesInSourceStore == null || !existingTemplatesInSourceStore.containsKey(tmplt.getUniqueName())) { - logger.debug("Template [{}] does not exist on image store [{}]; searching on another one.", - tmplt.getUniqueName(), sourceStore.getName()); - continue; - } - TemplateObject sourceTmpl = (TemplateObject) _templateFactory.getTemplate(tmplt.getId(), sourceStore); - if (sourceTmpl.getInstallPath() == null) { - logger.warn("Can not copy template [{}] from image store [{}], as it returned a null install path.", tmplt.getUniqueName(), - sourceStore.getName()); - continue; - } - storageOrchestrator.orchestrateTemplateCopyToImageStore(sourceTmpl, destStore); + if (searchAndCopyWithinZone(tmplt, destStore)) { return true; } - logger.debug("Can't copy template [{}] from another image store.", tmplt.getUniqueName()); + + Long destZoneId = destStore.getScope().getScopeId(); + logger.debug("Template [{}] not found in any image store of zone [{}]. Checking other zones.", + tmplt.getUniqueName(), destZoneId); + + return searchAndCopyAcrossZones(tmplt, destStore, destZoneId); + } + + private boolean searchAndCopyAcrossZones(VMTemplateVO tmplt, DataStore destStore, Long destZoneId) { + List allZoneIds = _dcDao.listAllIds(); + for (Long otherZoneId : allZoneIds) { + if (otherZoneId.equals(destZoneId)) { + continue; + } + + List storesInOtherZone = _storeMgr.getImageStoresByZoneIds(otherZoneId); + logger.debug("Checking zone [{}] for template [{}]...", otherZoneId, tmplt.getUniqueName()); + + if (CollectionUtils.isEmpty(storesInOtherZone)) { + logger.debug("Zone [{}] has no image stores. Skipping.", otherZoneId); + continue; + } + + TemplateObject sourceTmpl = findUsableTemplate(tmplt, storesInOtherZone); + if (sourceTmpl == null) { + logger.debug("Template [{}] not found with a valid install path in any image store of zone [{}].", + tmplt.getUniqueName(), otherZoneId); + continue; + } + + logger.info("Template [{}] found in zone [{}]. Initiating cross-zone copy to zone [{}].", + tmplt.getUniqueName(), otherZoneId, destZoneId); + + return copyTemplateAcrossZones(destStore, sourceTmpl); + } + + logger.debug("Template [{}] was not found in any zone. Cannot perform zone-to-zone copy.", tmplt.getUniqueName()); return false; } + protected TemplateObject findUsableTemplate(VMTemplateVO tmplt, List imageStores) { + for (DataStore store : imageStores) { + + Map templates = listTemplate(store); + if (templates == null || !templates.containsKey(tmplt.getUniqueName())) { + continue; + } + + TemplateObject tmpl = (TemplateObject) _templateFactory.getTemplate(tmplt.getId(), store); + if (tmpl.getInstallPath() == null) { + logger.debug("Template [{}] found in image store [{}] but install path is null. Skipping.", + tmplt.getUniqueName(), store.getName()); + continue; + } + return tmpl; + } + return null; + } + + private boolean searchAndCopyWithinZone(VMTemplateVO tmplt, DataStore destStore) { + Long destZoneId = destStore.getScope().getScopeId(); + List storesInSameZone = _storeMgr.getImageStoresByZoneIds(destZoneId); + + TemplateObject sourceTmpl = findUsableTemplate(tmplt, storesInSameZone); + if (sourceTmpl == null) { + return false; + } + + TemplateApiResult result; + AsyncCallFuture future = copyTemplateToImageStore(sourceTmpl, destStore); + try { + result = future.get(); + } catch (ExecutionException | InterruptedException e) { + logger.warn("Exception while copying template [{}] from image store [{}] to image store [{}]: {}", + sourceTmpl.getUniqueName(), sourceTmpl.getDataStore().getName(), destStore.getName(), e.toString()); + result = new TemplateApiResult(sourceTmpl); + result.setResult(e.getMessage()); + } + return result.isSuccess(); + } + + private boolean copyTemplateAcrossZones(DataStore destStore, TemplateObject sourceTmpl) { + Long dstZoneId = destStore.getScope().getScopeId(); + DataCenterVO dstZone = _dcDao.findById(dstZoneId); + + if (dstZone == null) { + logger.warn("Destination zone [{}] not found for template [{}].", dstZoneId, sourceTmpl.getUniqueName()); + return false; + } + + TemplateApiResult result; + try { + VMTemplateVO template = _templateDao.findById(sourceTmpl.getId()); + try { + DataStore sourceStore = sourceTmpl.getDataStore(); + long userId = CallContext.current().getCallingUserId(); + boolean success = _tmpltMgr.copy(userId, template, sourceStore, dstZone); + + result = new TemplateApiResult(sourceTmpl); + if (!success) { + result.setResult("Cross-zone template copy failed"); + } + } catch (StorageUnavailableException | ResourceAllocationException e) { + logger.error("Exception while copying template [{}] from zone [{}] to zone [{}]", + template, + sourceTmpl.getDataStore().getScope().getScopeId(), + dstZone.getId(), + e); + result = new TemplateApiResult(sourceTmpl); + result.setResult(e.getMessage()); + } finally { + ThreadContext.clearAll(); + } + } catch (Exception e) { + logger.error("Failed to copy template [{}] from zone [{}] to zone [{}].", + sourceTmpl.getUniqueName(), + sourceTmpl.getDataStore().getScope().getScopeId(), + dstZoneId, + e); + return false; + } + + return result.isSuccess(); + } + @Override public AsyncCallFuture copyTemplateToImageStore(DataObject source, DataStore destStore) { TemplateObject sourceTmpl = (TemplateObject) source; @@ -699,10 +816,6 @@ public class TemplateServiceImpl implements TemplateService { return null; } - protected boolean isCopyFromOtherStoragesEnabled(Long zoneId) { - return StorageManager.COPY_PUBLIC_TEMPLATES_FROM_OTHER_STORAGES.valueIn(zoneId); - } - protected void publishTemplateCreation(TemplateInfo tmplt) { VMTemplateVO tmpltVo = _templateDao.findById(tmplt.getId()); diff --git a/engine/storage/image/src/test/java/org/apache/cloudstack/storage/image/TemplateServiceImplTest.java b/engine/storage/image/src/test/java/org/apache/cloudstack/storage/image/TemplateServiceImplTest.java index cb7994915b3..e9eac045869 100644 --- a/engine/storage/image/src/test/java/org/apache/cloudstack/storage/image/TemplateServiceImplTest.java +++ b/engine/storage/image/src/test/java/org/apache/cloudstack/storage/image/TemplateServiceImplTest.java @@ -18,13 +18,20 @@ */ package org.apache.cloudstack.storage.image; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.StorageUnavailableException; +import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.template.TemplateProp; import com.cloud.template.TemplateManager; +import com.cloud.user.Account; +import com.cloud.user.User; +import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.StorageOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; -import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.image.store.TemplateObject; @@ -46,6 +53,8 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import static org.mockito.Mockito.mock; + @RunWith(MockitoJUnitRunner.class) public class TemplateServiceImplTest { @@ -89,6 +98,12 @@ public class TemplateServiceImplTest { @Mock TemplateManager templateManagerMock; + @Mock + VMTemplateDao templateDao; + + @Mock + DataCenterDao _dcDao; + Map templatesInSourceStore = new HashMap<>(); @Before @@ -101,7 +116,6 @@ public class TemplateServiceImplTest { Mockito.doReturn(List.of(sourceStoreMock, destStoreMock)).when(dataStoreManagerMock).getImageStoresByZoneIds(zoneId); Mockito.doReturn(templatesInSourceStore).when(templateService).listTemplate(sourceStoreMock); Mockito.doReturn(null).when(templateService).listTemplate(destStoreMock); - Mockito.doReturn("install-path").when(templateInfoMock).getInstallPath(); Mockito.doReturn(templateInfoMock).when(templateDataFactoryMock).getTemplate(2L, sourceStoreMock); Mockito.doReturn(3L).when(dataStoreMock).getId(); Mockito.doReturn(zoneScopeMock).when(dataStoreMock).getScope(); @@ -166,7 +180,7 @@ public class TemplateServiceImplTest { boolean result = templateService.tryCopyingTemplateToImageStore(tmpltMock, destStoreMock); Assert.assertFalse(result); - Mockito.verify(storageOrchestrator, Mockito.never()).orchestrateTemplateCopyToImageStore(Mockito.any(), Mockito.any()); + Mockito.verify(storageOrchestrator, Mockito.never()).orchestrateTemplateCopyFromSecondaryStores(Mockito.anyLong(), Mockito.any()); } @Test @@ -174,20 +188,161 @@ public class TemplateServiceImplTest { templatesInSourceStore.put(tmpltMock.getUniqueName(), tmpltPropMock); Mockito.doReturn(null).when(templateInfoMock).getInstallPath(); + Scope scopeMock = Mockito.mock(Scope.class); + Mockito.doReturn(scopeMock).when(destStoreMock).getScope(); + Mockito.doReturn(1L).when(scopeMock).getScopeId(); + Mockito.doReturn(List.of(1L)).when(_dcDao).listAllIds(); + boolean result = templateService.tryCopyingTemplateToImageStore(tmpltMock, destStoreMock); Assert.assertFalse(result); - Mockito.verify(storageOrchestrator, Mockito.never()).orchestrateTemplateCopyToImageStore(Mockito.any(), Mockito.any()); + Mockito.verify(storageOrchestrator, Mockito.never()).orchestrateTemplateCopyFromSecondaryStores(Mockito.anyLong(), Mockito.any()); } @Test - public void tryCopyingTemplateToImageStoreTestReturnsTrueWhenTemplateExistsInAnotherStorageAndTaskWasScheduled() { - templatesInSourceStore.put(tmpltMock.getUniqueName(), tmpltPropMock); - Mockito.doReturn(new AsyncCallFuture<>()).when(storageOrchestrator).orchestrateTemplateCopyToImageStore(Mockito.any(), Mockito.any()); + public void tryCopyingTemplateToImageStoreTestReturnsTrueWhenTemplateExistsInAnotherZone() throws StorageUnavailableException, ResourceAllocationException { + Scope scopeMock = Mockito.mock(Scope.class); + Mockito.doReturn(scopeMock).when(destStoreMock).getScope(); + Mockito.doReturn(1L).when(scopeMock).getScopeId(); + Mockito.doReturn(100L).when(tmpltMock).getId(); + Mockito.doReturn("unique-name").when(tmpltMock).getUniqueName(); + Mockito.doReturn(List.of(sourceStoreMock)).when(dataStoreManagerMock).getImageStoresByZoneIds(1L); + Mockito.doReturn(null).when(templateService).listTemplate(sourceStoreMock); + Mockito.doReturn(List.of(1L, 2L)).when(_dcDao).listAllIds(); + + DataStore otherZoneStoreMock = Mockito.mock(DataStore.class); + Mockito.doReturn(List.of(otherZoneStoreMock)).when(dataStoreManagerMock).getImageStoresByZoneIds(2L); + + Map templatesInOtherZone = new HashMap<>(); + templatesInOtherZone.put("unique-name", tmpltPropMock); + Mockito.doReturn(templatesInOtherZone).when(templateService).listTemplate(otherZoneStoreMock); + + TemplateObject sourceTmplMock = Mockito.mock(TemplateObject.class); + Mockito.doReturn(sourceTmplMock).when(templateDataFactoryMock).getTemplate(100L, otherZoneStoreMock); + Mockito.doReturn("/mnt/secondary/template.qcow2").when(sourceTmplMock).getInstallPath(); + + DataCenterVO dstZoneMock = Mockito.mock(DataCenterVO.class); + Mockito.doReturn(dstZoneMock).when(_dcDao).findById(1L); + Mockito.doReturn(true).when(templateManagerMock).copy(Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.any()); boolean result = templateService.tryCopyingTemplateToImageStore(tmpltMock, destStoreMock); Assert.assertTrue(result); - Mockito.verify(storageOrchestrator).orchestrateTemplateCopyToImageStore(Mockito.any(), Mockito.any()); + } + + @Test + public void tryCopyingTemplateToImageStoreTestReturnsFalseWhenDestinationZoneIsMissing() { + Scope scopeMock = Mockito.mock(Scope.class); + Mockito.doReturn(scopeMock).when(destStoreMock).getScope(); + Mockito.doReturn(1L).when(scopeMock).getScopeId(); + Mockito.doReturn(100L).when(tmpltMock).getId(); + Mockito.doReturn("unique-name").when(tmpltMock).getUniqueName(); + Mockito.doReturn(List.of(1L, 2L)).when(_dcDao).listAllIds(); + Mockito.doReturn(List.of()).when(dataStoreManagerMock).getImageStoresByZoneIds(1L); + + DataStore otherZoneStoreMock = Mockito.mock(DataStore.class); + Mockito.doReturn(List.of(otherZoneStoreMock)).when(dataStoreManagerMock).getImageStoresByZoneIds(2L); + + Map templates = new HashMap<>(); + templates.put("unique-name", tmpltPropMock); + Mockito.doReturn(templates).when(templateService).listTemplate(otherZoneStoreMock); + + TemplateObject sourceTmplMock = Mockito.mock(TemplateObject.class); + Mockito.doReturn(sourceTmplMock).when(templateDataFactoryMock).getTemplate(100L, otherZoneStoreMock); + Mockito.doReturn("/mnt/secondary/template.qcow2").when(sourceTmplMock).getInstallPath(); + Mockito.doReturn(null).when(_dcDao).findById(1L); + + boolean result = templateService.tryCopyingTemplateToImageStore(tmpltMock, destStoreMock); + + Assert.assertFalse(result); + } + + @Test + public void tryCopyingTemplateToImageStoreTestReturnsTrueWhenCrossZoneCopyTaskIsScheduled() throws StorageUnavailableException, ResourceAllocationException { + Scope scopeMock = Mockito.mock(Scope.class); + Mockito.doReturn(scopeMock).when(destStoreMock).getScope(); + Mockito.doReturn(1L).when(scopeMock).getScopeId(); + Mockito.doReturn(100L).when(tmpltMock).getId(); + Mockito.doReturn("unique-name").when(tmpltMock).getUniqueName(); + Mockito.doReturn(List.of(1L, 2L)).when(_dcDao).listAllIds(); + Mockito.doReturn(List.of()).when(dataStoreManagerMock).getImageStoresByZoneIds(1L); + + DataStore otherZoneStoreMock = Mockito.mock(DataStore.class); + Mockito.doReturn(List.of(otherZoneStoreMock)).when(dataStoreManagerMock).getImageStoresByZoneIds(2L); + + Map templates = new HashMap<>(); + templates.put("unique-name", tmpltPropMock); + Mockito.doReturn(templates).when(templateService).listTemplate(otherZoneStoreMock); + + TemplateObject sourceTmplMock = Mockito.mock(TemplateObject.class); + Mockito.doReturn(sourceTmplMock).when(templateDataFactoryMock).getTemplate(100L, otherZoneStoreMock); + Mockito.doReturn("/mnt/secondary/template.qcow2").when(sourceTmplMock).getInstallPath(); + Mockito.doReturn(100L).when(sourceTmplMock).getId(); + + DataStore sourceStoreMock = Mockito.mock(DataStore.class); + Scope sourceScopeMock = Mockito.mock(Scope.class); + Mockito.doReturn(sourceStoreMock).when(sourceTmplMock).getDataStore(); + + DataCenterVO dstZoneMock = Mockito.mock(DataCenterVO.class); + Mockito.doReturn(dstZoneMock).when(_dcDao).findById(1L); + VMTemplateVO templateVoMock = Mockito.mock(VMTemplateVO.class); + Mockito.doReturn(templateVoMock).when(templateDao).findById(100L); + + Mockito.doReturn(true).when(templateManagerMock).copy(Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.any()); + + Account account = mock(Account.class); + User user = mock(User.class); + CallContext callContext = mock(CallContext.class); + + boolean result = templateService.tryCopyingTemplateToImageStore(tmpltMock, destStoreMock); + + Assert.assertTrue(result); + } + + @Test + public void tryCopyingTemplateToImageStoreTestReturnsFalseWhenTemplateNotFoundInAnyZone() { + Scope scopeMock = Mockito.mock(Scope.class); + Mockito.doReturn(scopeMock).when(destStoreMock).getScope(); + Mockito.doReturn(1L).when(scopeMock).getScopeId(); + Mockito.doReturn(List.of(1L, 2L)).when(_dcDao).listAllIds(); + Mockito.doReturn(List.of(sourceStoreMock)).when(dataStoreManagerMock).getImageStoresByZoneIds(Mockito.anyLong()); + Mockito.doReturn(null).when(templateService).listTemplate(Mockito.any()); + + boolean result = templateService.tryCopyingTemplateToImageStore(tmpltMock, destStoreMock); + + Assert.assertFalse(result); + } + + @Test + public void testFindUsableTemplateReturnsTemplateWithNonNullInstallPath() { + VMTemplateVO template = Mockito.mock(VMTemplateVO.class); + Mockito.when(template.getId()).thenReturn(10L); + Mockito.when(template.getUniqueName()).thenReturn("test-template"); + + DataStore storeWithNullPath = Mockito.mock(DataStore.class); + Mockito.when(storeWithNullPath.getName()).thenReturn("store-null"); + + DataStore storeWithValidPath = Mockito.mock(DataStore.class); + TemplateObject tmplWithNullPath = Mockito.mock(TemplateObject.class); + Mockito.when(tmplWithNullPath.getInstallPath()).thenReturn(null); + + TemplateObject tmplWithValidPath = Mockito.mock(TemplateObject.class); + Mockito.when(tmplWithValidPath.getInstallPath()).thenReturn("/mnt/secondary/template.qcow2"); + + Mockito.doReturn(tmplWithNullPath).when(templateDataFactoryMock).getTemplate(10L, storeWithNullPath); + Mockito.doReturn(tmplWithValidPath).when(templateDataFactoryMock).getTemplate(10L, storeWithValidPath); + + Map templates = new HashMap<>(); + templates.put("test-template", Mockito.mock(TemplateProp.class)); + + Mockito.doReturn(templates).when(templateService).listTemplate(storeWithNullPath); + Mockito.doReturn(templates).when(templateService).listTemplate(storeWithValidPath); + + List imageStores = List.of(storeWithNullPath, storeWithValidPath); + + TemplateObject result = templateService.findUsableTemplate(template, imageStores); + + Assert.assertNotNull(result); + Assert.assertEquals(tmplWithValidPath, result); } } diff --git a/server/src/main/java/com/cloud/storage/ImageStoreDetailsUtil.java b/server/src/main/java/com/cloud/storage/ImageStoreDetailsUtil.java index baf5ef8902d..9f5aa660f4f 100755 --- a/server/src/main/java/com/cloud/storage/ImageStoreDetailsUtil.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreDetailsUtil.java @@ -78,4 +78,15 @@ public class ImageStoreDetailsUtil { return getGlobalDefaultNfsVersion(); } + public boolean isCopyTemplatesFromOtherStoragesEnabled(Long storeId, Long zoneId) { + final Map storeDetails = imageStoreDetailsDao.getDetails(storeId); + final String keyWithoutDots = StorageManager.COPY_TEMPLATES_FROM_OTHER_SECONDARY_STORAGES.key() + .replace(".", ""); + + if (storeDetails != null && storeDetails.containsKey(keyWithoutDots)) { + return Boolean.parseBoolean(storeDetails.get(keyWithoutDots)); + } + + return StorageManager.COPY_TEMPLATES_FROM_OTHER_SECONDARY_STORAGES.valueIn(zoneId); + } } diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 13b7fbb00c2..d1dca0fa901 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -4206,7 +4206,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C DataStoreDownloadFollowRedirects, AllowVolumeReSizeBeyondAllocation, StoragePoolHostConnectWorkers, - COPY_PUBLIC_TEMPLATES_FROM_OTHER_STORAGES + COPY_TEMPLATES_FROM_OTHER_SECONDARY_STORAGES }; } diff --git a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java index 5773410c35a..78265021c0a 100755 --- a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java @@ -842,6 +842,9 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, // Copy will just find one eligible image store for the destination zone // and copy template there, not propagate to all image stores // for that zone + + boolean copied = false; + for (DataStore dstSecStore : dstSecStores) { TemplateDataStoreVO dstTmpltStore = _tmplStoreDao.findByStoreTemplate(dstSecStore.getId(), tmpltId); if (dstTmpltStore != null && dstTmpltStore.getDownloadState() == Status.DOWNLOADED) { @@ -856,9 +859,12 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, TemplateApiResult result = future.get(); if (result.isFailed()) { logger.debug("Copy Template failed for image store {}: {}", dstSecStore, result.getResult()); + _tmplStoreDao.removeByTemplateStore(tmpltId, dstSecStore.getId()); continue; // try next image store } + copied = true; + _tmpltDao.addTemplateToZone(template, dstZoneId); if (account.getId() != Account.ACCOUNT_ID_SYSTEM) { @@ -886,12 +892,14 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } } } + + return true; + } catch (Exception ex) { - logger.debug("Failed to copy Template to image store:{} ,will try next one", dstSecStore); + logger.debug("Failed to copy Template to image store:{} ,will try next one", dstSecStore, ex); } } - return true; - + return copied; } @Override diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index b2465fa325f..99873820d53 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -591,6 +591,8 @@ "label.copy.consoleurl": "Copy console URL to clipboard", "label.copyid": "Copy ID", "label.copy.password": "Copy password", +"label.copy.templates.from.other.secondary.storages": "Copy Templates from other storages instead of fetching from URLs", +"label.copy.templates.from.other.secondary.storages.add.zone": "Copy Templates from other storages", "label.core": "Core", "label.core.zone.type": "Core Zone type", "label.counter": "Counter", @@ -3019,7 +3021,7 @@ "message.desc.importmigratefromvmwarewizard": "By selecting an existing or external VMware Datacenter and an instance to import, CloudStack migrates the selected instance from VMware to KVM on a conversion host using virt-v2v and imports it into a KVM Cluster", "message.desc.primary.storage": "Each Cluster must contain one or more primary storage servers. We will add the first one now. Primary storage contains the disk volumes for all the Instances running on hosts in the cluster. Use any standards-compliant protocol that is supported by the underlying hypervisor.", "message.desc.reset.ssh.key.pair": "Please specify a ssh key pair that you would like to add to this Instance.", -"message.desc.secondary.storage": "Each Zone must have at least one NFS or secondary storage server. We will add the first one now. Secondary storage stores Instance Templates, ISO images, and Instance disk volume Snapshots. This server must be available to all hosts in the zone.

Provide the IP address and exported path.", +"message.desc.secondary.storage": "Each Zone must have at least one NFS or secondary storage server. We will add the first one now. Secondary storage stores Instance Templates, ISO images, and Instance disk volume Snapshots. This server must be available to all hosts in the zone.

Provide the IP address and exported path.

\"Copy templates from other secondary storages\" switch can be used to automatically copy existing templates from secondary storages in other zones instead of fetching from their URLs.", "message.desc.register.user.data": "Please fill in the following to register new User Data.", "message.desc.registered.user.data": "Registered a User Data.", "message.desc.zone": "A Zone is the largest organizational unit in CloudStack, and it typically corresponds to a single datacenter. Zones provide physical isolation and redundancy. A zone consists of one or more Pods (each of which contains hosts and primary storage servers) and a secondary storage server which is shared by all pods in the zone.", diff --git a/ui/src/views/infra/AddSecondaryStorage.vue b/ui/src/views/infra/AddSecondaryStorage.vue index 746af5b959d..db4893115a6 100644 --- a/ui/src/views/infra/AddSecondaryStorage.vue +++ b/ui/src/views/infra/AddSecondaryStorage.vue @@ -48,6 +48,7 @@
+
+ + + +
{{ $t('label.cancel') }} {{ $t('label.ok') }} @@ -191,7 +204,9 @@ export default { providers: ['NFS', 'SMB/CIFS', 'S3', 'Swift'], zones: [], loading: false, - secondaryStorageNFSStaging: false + secondaryStorageNFSStaging: false, + showCopyTemplatesToggle: false, + copyTemplatesTouched: false } }, created () { @@ -203,7 +218,8 @@ export default { this.formRef = ref() this.form = reactive({ provider: 'NFS', - secondaryStorageHttps: true + secondaryStorageHttps: true, + copyTemplatesFromOtherSecondaryStorages: true }) this.rules = reactive({ zone: [{ required: true, message: this.$t('label.required') }], @@ -225,20 +241,56 @@ export default { }, fetchData () { this.listZones() + this.checkOtherSecondaryStorages() }, closeModal () { this.$emit('close-action') }, + fetchCopyTemplatesConfig () { + if (!this.form.zone) { + return + } + + api('listConfigurations', { + name: 'copy.templates.from.other.secondary.storages', + zoneid: this.form.zone + }).then(json => { + const items = + json?.listconfigurationsresponse?.configuration || [] + + items.forEach(item => { + if (item.name === 'copy.templates.from.other.secondary.storages') { + this.form.copyTemplatesFromOtherSecondaryStorages = + item.value === 'true' + } + }) + }) + }, + onZoneChange (val) { + this.form.zone = val + this.copyTemplatesTouched = false + this.fetchCopyTemplatesConfig() + }, listZones () { api('listZones', { showicon: true }).then(json => { - if (json && json.listzonesresponse && json.listzonesresponse.zone) { - this.zones = json.listzonesresponse.zone - if (this.zones.length > 0) { - this.form.zone = this.zones[0].id || '' - } + this.zones = json.listzonesresponse.zone || [] + + if (this.zones.length > 0) { + this.form.zone = this.zones[0].id + this.fetchCopyTemplatesConfig() } }) }, + checkOtherSecondaryStorages () { + api('listImageStores', { listall: true }).then(json => { + const stores = json?.listimagestoresresponse?.imagestore || [] + + this.showCopyTemplatesToggle = stores.length > 0 + }) + }, + onCopyTemplatesToggleChanged (val) { + this.copyTemplatesTouched = true + }, nfsURL (server, path) { var url if (path.substring(0, 1) !== '/') { @@ -362,6 +414,22 @@ export default { nfsParams.url = nfsUrl } + if ( + this.showCopyTemplatesToggle && + this.copyTemplatesTouched + ) { + const copyTemplatesKey = 'copytemplatesfromothersecondarystorages' + + const detailIdx = Object.keys(data) + .filter(k => k.startsWith('details[')) + .map(k => parseInt(k.match(/details\[(\d+)\]/)[1])) + .reduce((a, b) => Math.max(a, b), -1) + 1 + + data[`details[${detailIdx}].key`] = copyTemplatesKey + data[`details[${detailIdx}].value`] = + values.copyTemplatesFromOtherSecondaryStorages.toString() + } + this.loading = true try { diff --git a/ui/src/views/infra/zone/ZoneWizardAddResources.vue b/ui/src/views/infra/zone/ZoneWizardAddResources.vue index 4bd602f0aca..298cc7fec9d 100644 --- a/ui/src/views/infra/zone/ZoneWizardAddResources.vue +++ b/ui/src/views/infra/zone/ZoneWizardAddResources.vue @@ -840,6 +840,13 @@ export default { display: { secondaryStorageProvider: ['Swift'] } + }, + { + title: 'label.copy.templates.from.other.secondary.storages.add.zone', + key: 'copyTemplatesFromOtherSecondaryStorages', + required: false, + switch: true, + checked: this.copytemplate } ] } @@ -860,7 +867,8 @@ export default { }], storageProviders: [], currentStep: null, - options: ['primaryStorageScope', 'primaryStorageProtocol', 'provider', 'primaryStorageProvider'] + options: ['primaryStorageScope', 'primaryStorageProtocol', 'provider', 'primaryStorageProvider'], + copytemplate: true } }, created () { @@ -885,6 +893,7 @@ export default { primaryStorageScope: null }) } + this.applyCopyTemplatesOptionFromGlobalSettingDuringSecondaryStorageAddition() } }, watch: { @@ -1108,6 +1117,20 @@ export default { this.storageProviders = storageProviders }) }, + applyCopyTemplatesOptionFromGlobalSettingDuringSecondaryStorageAddition () { + api('listConfigurations', { + name: 'copy.templates.from.other.secondary.storages' + }).then(json => { + const config = json?.listconfigurationsresponse?.configuration?.[0] + + if (!config || config.value === undefined) { + return + } + + const value = String(config.value).toLowerCase() === 'true' + this.copytemplate = value + }) + }, fetchPrimaryStorageProvider () { this.primaryStorageProviders = [] api('listStorageProviders', { type: 'primary' }).then(json => { diff --git a/ui/src/views/infra/zone/ZoneWizardLaunchZone.vue b/ui/src/views/infra/zone/ZoneWizardLaunchZone.vue index a787ad839cd..fbf5e6f5c20 100644 --- a/ui/src/views/infra/zone/ZoneWizardLaunchZone.vue +++ b/ui/src/views/infra/zone/ZoneWizardLaunchZone.vue @@ -1580,6 +1580,11 @@ export default { params.provider = this.prefillContent.secondaryStorageProvider params.zoneid = this.stepData.zoneReturned.id params.url = url + if (this.prefillContent.copyTemplatesFromOtherSecondaryStorages !== undefined) { + params['details[0].key'] = 'copytemplatesfromothersecondarystorages' + params['details[0].value'] = + this.prefillContent.copyTemplatesFromOtherSecondaryStorages + } } else if (this.prefillContent.secondaryStorageProvider === 'SMB') { const nfsServer = this.prefillContent.secondaryStorageServer const path = this.prefillContent.secondaryStoragePath From ff7ec0cd229fc829f3f978e3573366cd20fec5f2 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Wed, 28 Jan 2026 16:15:48 +0530 Subject: [PATCH 070/126] Update alert id for VR public and private interface (#12527) --- .../main/java/org/apache/cloudstack/alert/AlertService.java | 4 ++-- .../src/main/java/com/cloud/alert/AlertManager.java | 1 - .../src/main/resources/META-INF/db/schema-42020to42030.sql | 3 +++ server/src/main/java/com/cloud/event/AlertGenerator.java | 5 +++-- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/alert/AlertService.java b/api/src/main/java/org/apache/cloudstack/alert/AlertService.java index 1250284b5c2..4ae6288efce 100644 --- a/api/src/main/java/org/apache/cloudstack/alert/AlertService.java +++ b/api/src/main/java/org/apache/cloudstack/alert/AlertService.java @@ -71,8 +71,8 @@ public interface AlertService { public static final AlertType ALERT_TYPE_HA_ACTION = new AlertType((short)30, "ALERT.HA.ACTION", true); public static final AlertType ALERT_TYPE_CA_CERT = new AlertType((short)31, "ALERT.CA.CERT", true); public static final AlertType ALERT_TYPE_VM_SNAPSHOT = new AlertType((short)32, "ALERT.VM.SNAPSHOT", true); - public static final AlertType ALERT_TYPE_VR_PUBLIC_IFACE_MTU = new AlertType((short)32, "ALERT.VR.PUBLIC.IFACE.MTU", true); - public static final AlertType ALERT_TYPE_VR_PRIVATE_IFACE_MTU = new AlertType((short)32, "ALERT.VR.PRIVATE.IFACE.MTU", true); + public static final AlertType ALERT_TYPE_VR_PUBLIC_IFACE_MTU = new AlertType((short)33, "ALERT.VR.PUBLIC.IFACE.MTU", true); + public static final AlertType ALERT_TYPE_VR_PRIVATE_IFACE_MTU = new AlertType((short)34, "ALERT.VR.PRIVATE.IFACE.MTU", true); public short getType() { return type; diff --git a/engine/components-api/src/main/java/com/cloud/alert/AlertManager.java b/engine/components-api/src/main/java/com/cloud/alert/AlertManager.java index 3d4e6579f7c..7fe19c3ba9f 100644 --- a/engine/components-api/src/main/java/com/cloud/alert/AlertManager.java +++ b/engine/components-api/src/main/java/com/cloud/alert/AlertManager.java @@ -54,5 +54,4 @@ public interface AlertManager extends Manager, AlertService { void recalculateCapacity(); void sendAlert(AlertType alertType, long dataCenterId, Long podId, String subject, String body); - } diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42020to42030.sql b/engine/schema/src/main/resources/META-INF/db/schema-42020to42030.sql index 598fdb7adc4..567e623564e 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42020to42030.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42020to42030.sql @@ -20,3 +20,6 @@ --; ALTER TABLE `cloud`.`template_store_ref` MODIFY COLUMN `download_url` varchar(2048); + +UPDATE `cloud`.`alert` SET type = 33 WHERE name = 'ALERT.VR.PUBLIC.IFACE.MTU'; +UPDATE `cloud`.`alert` SET type = 34 WHERE name = 'ALERT.VR.PRIVATE.IFACE.MTU'; diff --git a/server/src/main/java/com/cloud/event/AlertGenerator.java b/server/src/main/java/com/cloud/event/AlertGenerator.java index f1b23e87308..601bf5e831a 100644 --- a/server/src/main/java/com/cloud/event/AlertGenerator.java +++ b/server/src/main/java/com/cloud/event/AlertGenerator.java @@ -67,12 +67,13 @@ public class AlertGenerator { } public static void publishAlertOnEventBus(String alertType, long dataCenterId, Long podId, String subject, String body) { - String configKey = Config.PublishAlertEvent.key(); String value = s_configDao.getValue(configKey); boolean configValue = Boolean.parseBoolean(value); - if(!configValue) + if (!configValue) { return; + } + try { eventDistributor = ComponentContext.getComponent(EventDistributor.class); } catch (NoSuchBeanDefinitionException nbe) { From 83ce0067b82bc39c7c91667c6ac4d2dd144ce450 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Wed, 28 Jan 2026 16:37:57 +0530 Subject: [PATCH 071/126] Update the snapshot physical size for the primary storage resource after snapshot creation and during resource count recalculation (#12481) * Update snapshot size for the primary storage resource after snapshot creation and during resource count recalculation * Update snapshot physical size * review * review --- .../user/snapshot/CreateSnapshotCmd.java | 3 +- .../datastore/db/SnapshotDataStoreDao.java | 14 +++++++++ .../db/SnapshotDataStoreDaoImpl.java | 29 +++++++++++++++++- .../ResourceLimitManagerImpl.java | 10 +++---- .../storage/snapshot/SnapshotManagerImpl.java | 30 +++++++++++-------- .../ResourceLimitManagerImplTest.java | 10 +++++-- 6 files changed, 73 insertions(+), 23 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java index bd541b69183..078d4517f95 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java @@ -244,8 +244,7 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd { } private Snapshot.LocationType getLocationType() { - - if (Snapshot.LocationType.values() == null || Snapshot.LocationType.values().length == 0 || locationType == null) { + if (locationType == null) { return null; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java index ef0a5d0ebff..96df4928773 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java @@ -110,4 +110,18 @@ StateDao snapshotIds, Long batchSize); + + /** + * Returns the total physical size, in bytes, of all snapshots stored on primary + * storage for the specified account that have not yet been backed up to + * secondary storage. + * + *

If no such snapshots are found, this method returns {@code 0}.

+ * + * @param accountId the ID of the account whose snapshots on primary storage + * should be considered + * @return the total physical size in bytes of matching snapshots on primary + * storage, or {@code 0} if none are found + */ + long getSnapshotsPhysicalSizeOnPrimaryStorageByAccountId(long accountId); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java index ba76a6b3f41..c68316dd1fe 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java @@ -78,6 +78,15 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase params) throws ConfigurationException { super.configure(name, params); @@ -118,7 +127,6 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase) status -> { - long newResourceCount = 0L; List domainIdList = childDomains.stream().map(DomainVO::getId).collect(Collectors.toList()); domainIdList.add(domainId); List accountIdList = accounts.stream().map(AccountVO::getId).collect(Collectors.toList()); @@ -1189,6 +1188,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim List resourceCounts = _resourceCountDao.lockRows(rowIdsToLock); long oldResourceCount = 0L; + long newResourceCount = 0L; ResourceCountVO domainRC = null; // calculate project count here @@ -1210,7 +1210,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim if (oldResourceCount != newResourceCount) { domainRC.setCount(newResourceCount); _resourceCountDao.update(domainRC.getId(), domainRC); - logger.warn("Discrepency in the resource count has been detected (original count = {} correct count = {}) for Type = {} for Domain ID = {} is fixed during resource count recalculation.", + logger.warn("Discrepancy in the resource count has been detected (original count = {} correct count = {}) for Type = {} for Domain ID = {} is fixed during resource count recalculation.", oldResourceCount, newResourceCount, type, domainId); } return newResourceCount; @@ -1436,16 +1436,17 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim } protected long calculatePrimaryStorageForAccount(long accountId, String tag) { + long snapshotsPhysicalSizeOnPrimaryStorage = _snapshotDataStoreDao.getSnapshotsPhysicalSizeOnPrimaryStorageByAccountId(accountId); if (StringUtils.isEmpty(tag)) { List virtualRouters = _vmDao.findIdsOfAllocatedVirtualRoutersForAccount(accountId); - return _volumeDao.primaryStorageUsedForAccount(accountId, virtualRouters); + return snapshotsPhysicalSizeOnPrimaryStorage + _volumeDao.primaryStorageUsedForAccount(accountId, virtualRouters); } long storage = 0; List volumes = getVolumesWithAccountAndTag(accountId, tag); for (VolumeVO volume : volumes) { storage += volume.getSize() == null ? 0L : volume.getSize(); } - return storage; + return snapshotsPhysicalSizeOnPrimaryStorage + storage; } @Override @@ -2143,7 +2144,6 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim protected class ResourceCountCheckTask extends ManagedContextRunnable { public ResourceCountCheckTask() { - } @Override diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java index e7606572a07..19cde4da0f1 100755 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -276,6 +276,15 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement return !DataCenter.Type.Edge.equals(zone.getType()); } + private ResourceType getStoreResourceType(long dataCenterId, Snapshot.LocationType locationType) { + ResourceType storeResourceType = ResourceType.secondary_storage; + if (!isBackupSnapshotToSecondaryForZone(dataCenterId) || + Snapshot.LocationType.PRIMARY.equals(locationType)) { + storeResourceType = ResourceType.primary_storage; + } + return storeResourceType; + } + @Override public String getConfigComponentName() { return SnapshotManager.class.getSimpleName(); @@ -614,7 +623,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement _snapshotDao.update(snapshot.getId(), snapshot); snapshotInfo = this.snapshotFactory.getSnapshot(snapshotId, store); - Long snapshotOwnerId = vm.getAccountId(); + long snapshotOwnerId = vm.getAccountId(); try { SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.BACKUP); @@ -622,7 +631,6 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement throw new CloudRuntimeException(String.format("Unable to find Snapshot strategy to handle Snapshot [%s]", snapshot)); } snapshotInfo = snapshotStrategy.backupSnapshot(snapshotInfo); - } catch (Exception e) { logger.debug("Failed to backup Snapshot from Instance Snapshot", e); _resourceLimitMgr.decrementResourceCount(snapshotOwnerId, ResourceType.snapshot); @@ -771,12 +779,11 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement _accountMgr.checkAccess(caller, null, true, snapshotCheck); SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshotCheck, zoneId, SnapshotOperation.DELETE); - if (snapshotStrategy == null) { logger.error("Unable to find snapshot strategy to handle snapshot [{}]", snapshotCheck); - return false; } + Pair, List> storeRefAndZones = getStoreRefsAndZonesForSnapshotDelete(snapshotId, zoneId); List snapshotStoreRefs = storeRefAndZones.first(); List zoneIds = storeRefAndZones.second(); @@ -1472,8 +1479,9 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_CREATE, snapshot.getAccountId(), snapshot.getDataCenterId(), snapshotId, snapshot.getName(), null, null, snapshotStoreRef.getPhysicalSize(), volume.getSize(), snapshot.getClass().getName(), snapshot.getUuid()); + ResourceType storeResourceType = dataStoreRole == DataStoreRole.Image ? ResourceType.secondary_storage : ResourceType.primary_storage; // Correct the resource count of snapshot in case of delta snapshots. - _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.secondary_storage, new Long(volume.getSize() - snapshotStoreRef.getPhysicalSize())); + _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), storeResourceType, new Long(volume.getSize() - snapshotStoreRef.getPhysicalSize())); if (!payload.getAsyncBackup() && backupSnapToSecondary) { copyNewSnapshotToZones(snapshotId, snapshot.getDataCenterId(), payload.getZoneIds()); @@ -1485,15 +1493,17 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement if (logger.isDebugEnabled()) { logger.debug("Failed to create snapshot" + cre.getLocalizedMessage()); } + ResourceType storeResourceType = getStoreResourceType(volume.getDataCenterId(), payload.getLocationType()); _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.snapshot); - _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.secondary_storage, new Long(volume.getSize())); + _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), storeResourceType, new Long(volume.getSize())); throw cre; } catch (Exception e) { if (logger.isDebugEnabled()) { logger.debug("Failed to create snapshot", e); } + ResourceType storeResourceType = getStoreResourceType(volume.getDataCenterId(), payload.getLocationType()); _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.snapshot); - _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.secondary_storage, new Long(volume.getSize())); + _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), storeResourceType, new Long(volume.getSize())); throw new CloudRuntimeException("Failed to create snapshot", e); } return snapshot; @@ -1695,11 +1705,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement Type snapshotType = getSnapshotType(policyId); Account owner = _accountMgr.getAccount(volume.getAccountId()); - ResourceType storeResourceType = ResourceType.secondary_storage; - if (!isBackupSnapshotToSecondaryForZone(volume.getDataCenterId()) || - Snapshot.LocationType.PRIMARY.equals(locationType)) { - storeResourceType = ResourceType.primary_storage; - } + ResourceType storeResourceType = getStoreResourceType(volume.getDataCenterId(), locationType); try { _resourceLimitMgr.checkResourceLimit(owner, ResourceType.snapshot); _resourceLimitMgr.checkResourceLimit(owner, storeResourceType, volume.getSize()); diff --git a/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java b/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java index 34030626d22..53ccc830dd2 100644 --- a/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java +++ b/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java @@ -28,6 +28,7 @@ import org.apache.cloudstack.api.response.DomainResponse; import org.apache.cloudstack.api.response.TaggedResourceLimitAndCountResponse; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.reservation.dao.ReservationDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; @@ -118,6 +119,8 @@ public class ResourceLimitManagerImplTest extends TestCase { VolumeDao volumeDao; @Mock UserVmDao userVmDao; + @Mock + SnapshotDataStoreDao snapshotDataStoreDao; private List hostTags = List.of("htag1", "htag2", "htag3"); private List storageTags = List.of("stag1", "stag2"); @@ -840,12 +843,13 @@ public class ResourceLimitManagerImplTest extends TestCase { String tag = null; Mockito.when(vmDao.findIdsOfAllocatedVirtualRoutersForAccount(accountId)) .thenReturn(List.of(1L)); + Mockito.when(snapshotDataStoreDao.getSnapshotsPhysicalSizeOnPrimaryStorageByAccountId(accountId)).thenReturn(100L); Mockito.when(volumeDao.primaryStorageUsedForAccount(Mockito.eq(accountId), Mockito.anyList())).thenReturn(100L); - Assert.assertEquals(100L, resourceLimitManager.calculatePrimaryStorageForAccount(accountId, tag)); + Assert.assertEquals(200L, resourceLimitManager.calculatePrimaryStorageForAccount(accountId, tag)); tag = ""; Mockito.when(volumeDao.primaryStorageUsedForAccount(Mockito.eq(accountId), Mockito.anyList())).thenReturn(200L); - Assert.assertEquals(200L, resourceLimitManager.calculatePrimaryStorageForAccount(accountId, tag)); + Assert.assertEquals(300L, resourceLimitManager.calculatePrimaryStorageForAccount(accountId, tag)); tag = "tag"; VolumeVO vol = Mockito.mock(VolumeVO.class); @@ -853,7 +857,7 @@ public class ResourceLimitManagerImplTest extends TestCase { Mockito.when(vol.getSize()).thenReturn(size); List vols = List.of(vol, vol); Mockito.doReturn(vols).when(resourceLimitManager).getVolumesWithAccountAndTag(accountId, tag); - Assert.assertEquals(vols.size() * size, resourceLimitManager.calculatePrimaryStorageForAccount(accountId, tag)); + Assert.assertEquals((vols.size() * size) + 100L, resourceLimitManager.calculatePrimaryStorageForAccount(accountId, tag)); } @Test From 38e30a116c1c7b7011582e4b6c14f541e5be651b Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 28 Jan 2026 06:12:32 -0500 Subject: [PATCH 072/126] Add support for vTPM for XenServer and XCP-ng 8.3/8.4 (#12263) * XenServer 8.4/XCP-ng 8.3: Support vTPM * fix issue * add log for windows 11 or other such guests OSs that require vtpm * remove secure bootmode requirement * Fix uefi setting on host for xenserver 8.4 --- .../resource/CitrixResourceBase.java | 79 +++++++++++++++++++ .../xenbase/CitrixReadyCommandWrapper.java | 10 ++- .../xenbase/CitrixStartCommandWrapper.java | 8 ++ .../vm/hypervisor/xenserver/xenserver84/vmops | 40 +++++++++- .../com/cloud/api/query/QueryManagerImpl.java | 4 + 5 files changed, 139 insertions(+), 2 deletions(-) diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java index 063a5a18ca2..cdb4d7434ae 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java @@ -51,6 +51,7 @@ import java.util.concurrent.TimeoutException; import javax.naming.ConfigurationException; import javax.xml.parsers.ParserConfigurationException; +import com.xensource.xenapi.VTPM; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.diagnostics.CopyToSecondaryStorageAnswer; import org.apache.cloudstack.diagnostics.CopyToSecondaryStorageCommand; @@ -5826,4 +5827,82 @@ public abstract class CitrixResourceBase extends ServerResourceBase implements S public void destroyVm(VM vm, Connection connection) throws XenAPIException, XmlRpcException { destroyVm(vm, connection, false); } + + /** + * Configure vTPM (Virtual Trusted Platform Module) support for a VM. + * vTPM provides a virtual TPM 2.0 device for VMs, enabling features like Secure Boot and disk encryption. + * + * Requirements: + * - XenServer/XCP-ng 8.3 (and above) + * - UEFI Secure Boot enabled + * - VM in halted state + * + * @param conn XenServer connection + * @param vm The VM to configure + * @param vmSpec VM specification containing vTPM settings + */ + public void configureVTPM(Connection conn, VM vm, VirtualMachineTO vmSpec) throws XenAPIException, XmlRpcException { + if (vmSpec == null || vmSpec.getDetails() == null) { + return; + } + + String vtpmEnabled = vmSpec.getDetails().getOrDefault(VmDetailConstants.VIRTUAL_TPM_ENABLED, null); + + final Map platform = vm.getPlatform(conn); + if (platform != null) { + final String guestRequiresVtpm = platform.get("vtpm"); + if (guestRequiresVtpm != null && Boolean.parseBoolean(guestRequiresVtpm) && !Boolean.parseBoolean(vtpmEnabled)) { + logger.warn("Guest OS requires vTPM by default, even if VM details doesn't have the setting: {}", vmSpec.getName()); + return; + } + } + + if (!Boolean.parseBoolean(vtpmEnabled)) { + return; + } + + String bootMode = StringUtils.defaultIfEmpty(vmSpec.getDetails().get(ApiConstants.BootType.UEFI.toString()), null); + String bootType = (bootMode == null) ? ApiConstants.BootType.BIOS.toString() : ApiConstants.BootType.UEFI.toString(); + + if (!ApiConstants.BootType.UEFI.toString().equals(bootType)) { + logger.warn("vTPM requires UEFI boot mode. Skipping vTPM configuration for VM: {}", vmSpec.getName()); + return; + } + + try { + Set existingVtpms = vm.getVTPMs(conn); + if (!existingVtpms.isEmpty()) { + logger.debug("vTPM already exists for VM: {}", vmSpec.getName()); + return; + } + + // Creates vTPM using: xe vtpm-create vm-uuid= + String vmUuid = vm.getUuid(conn); + String result = callHostPlugin(conn, "vmops", "create_vtpm", "vm_uuid", vmUuid); + + if (result == null || result.isEmpty() || result.startsWith("ERROR:") || result.startsWith("EXCEPTION:")) { + throw new CloudRuntimeException("Failed to create vTPM, result: " + result); + } + + logger.info("Successfully created vTPM {} for VM: {}", result.trim(), vmSpec.getName()); + } catch (Exception e) { + logger.warn("Failed to configure vTPM for VM: {}, continuing without vTPM", vmSpec.getName(), e); + } + } + + public boolean isVTPMSupported(Connection conn, Host host) { + try { + Host.Record hostRecord = host.getRecord(conn); + String productVersion = hostRecord.softwareVersion.get("product_version"); + if (productVersion == null) { + return false; + } + ComparableVersion currentVersion = new ComparableVersion(productVersion); + ComparableVersion minVersion = new ComparableVersion("8.2.0"); + return currentVersion.compareTo(minVersion) >= 0; + } catch (Exception e) { + logger.warn("Failed to check vTPM support on host", e); + return false; + } + } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixReadyCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixReadyCommandWrapper.java index c5605e85f94..4f55ae82337 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixReadyCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixReadyCommandWrapper.java @@ -60,12 +60,20 @@ public final class CitrixReadyCommandWrapper extends CommandWrapper vms = host.getResidentVMs(conn); citrixResourceBase.destroyPatchVbd(conn, vms); + } catch (final Exception e) { + logger.warn("Unable to destroy CD-ROM device for system VMs", e); + } + + try { + final Host host = Host.getByUuid(conn, citrixResourceBase.getHost().getUuid()); final Host.Record hr = host.getRecord(conn); if (isUefiSupported(CitrixHelper.getProductVersion(hr))) { hostDetails.put(com.cloud.host.Host.HOST_UEFI_ENABLE, Boolean.TRUE.toString()); } - } catch (final Exception e) { + } catch (Exception e) { + logger.warn("Unable to get UEFI support info", e); } + try { final boolean result = citrixResourceBase.cleanupHaltedVms(conn); if (!result) { diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStartCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStartCommandWrapper.java index d448638f028..5c2355a4cec 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStartCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStartCommandWrapper.java @@ -97,6 +97,14 @@ public final class CitrixStartCommandWrapper extends CommandWrapper(ConfigKey.CATEGORY_ADVANCED, String.class, "user.password.reset.mail.template", "Hello {{username}}!\n" + "You have requested to reset your password. Please click the following link to reset your password:\n" + - "{{{domainUrl}}}{{{resetLink}}}\n" + + "{{{resetLink}}}\n" + "If you did not request a password reset, please ignore this email.\n" + "\n" + "Regards,\n" + @@ -179,10 +181,26 @@ public class UserPasswordResetManagerImpl extends ManagerBase implements UserPas final String email = userAccount.getEmail(); final String username = userAccount.getUsername(); final String subject = "Password Reset Request"; - final String domainUrl = UserPasswordResetDomainURL.value(); + String domainUrl = UserPasswordResetDomainURL.value(); + if (StringUtils.isBlank(domainUrl)) { + String mgmtServerAddr = ManagementServerAddresses.value().split(",")[0]; + if (ServerProperties.isHttpsEnabled()) { + domainUrl = "https://" + mgmtServerAddr + ":" + ServerProperties.getHttpsPort(); + } else { + domainUrl = "http://" + mgmtServerAddr + ":" + ServerProperties.getHttpPort(); + } + } else if (!domainUrl.startsWith("http://") && !domainUrl.startsWith("https://")) { + if (ServerProperties.isHttpsEnabled()) { + domainUrl = "https://" + domainUrl; + } else { + domainUrl = "http://" + domainUrl; + } + } - String resetLink = String.format("/client/#/user/resetPassword?username=%s&token=%s", - username, resetToken); + domainUrl = domainUrl.replaceAll("/+$", ""); + + String resetLink = String.format("%s/client/#/user/resetPassword?username=%s&token=%s", + domainUrl, username, resetToken); String content = getMessageBody(userAccount, resetToken, resetLink); SMTPMailProperties mailProperties = new SMTPMailProperties(); diff --git a/utils/src/main/java/com/cloud/utils/server/ServerProperties.java b/utils/src/main/java/com/cloud/utils/server/ServerProperties.java index 36d8614e68f..9e81fff90f0 100644 --- a/utils/src/main/java/com/cloud/utils/server/ServerProperties.java +++ b/utils/src/main/java/com/cloud/utils/server/ServerProperties.java @@ -17,10 +17,12 @@ package com.cloud.utils.server; import com.cloud.utils.crypt.EncryptionSecretKeyChecker; +import com.cloud.utils.StringUtils; import org.apache.commons.io.IOUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import java.io.File; import java.io.IOException; import java.io.InputStream; import java.util.Properties; @@ -28,9 +30,20 @@ import java.util.Properties; public class ServerProperties { protected Logger logger = LogManager.getLogger(getClass()); + public static final String HTTP_ENABLE = "http.enable"; + public static final String HTTP_PORT = "http.port"; + public static final String HTTPS_ENABLE = "https.enable"; + public static final String HTTPS_PORT = "https.port"; + public static final String KEYSTORE_FILE = "https.keystore"; + public static final String PASSWORD_ENCRYPTION_TYPE = "password.encryption.type"; + private static Properties properties = new Properties(); private static boolean loaded = false; - public static final String passwordEncryptionType = "password.encryption.type"; + + private static int httpPort = 8080; + + private static boolean httpsEnable = false; + private static int httpsPort = 8443; public synchronized static Properties getServerProperties(InputStream inputStream) { if (!loaded) { @@ -39,7 +52,7 @@ public class ServerProperties { serverProps.load(inputStream); EncryptionSecretKeyChecker checker = new EncryptionSecretKeyChecker(); - checker.check(serverProps, passwordEncryptionType); + checker.check(serverProps, PASSWORD_ENCRYPTION_TYPE); if (EncryptionSecretKeyChecker.useEncryption()) { EncryptionSecretKeyChecker.decryptAnyProperties(serverProps); @@ -50,10 +63,29 @@ public class ServerProperties { IOUtils.closeQuietly(inputStream); } + httpPort = Integer.parseInt(serverProps.getProperty(ServerProperties.HTTP_PORT, "8080")); + + boolean httpsEnabled = Boolean.parseBoolean(serverProps.getProperty(ServerProperties.HTTPS_ENABLE, "false")); + String keystoreFile = serverProps.getProperty(KEYSTORE_FILE); + httpsEnable = httpsEnabled && StringUtils.isNotEmpty(keystoreFile) && new File(keystoreFile).exists(); + httpsPort = Integer.parseInt(serverProps.getProperty(ServerProperties.HTTPS_PORT, "8443")); + properties = serverProps; loaded = true; } return properties; } + + public static int getHttpPort() { + return httpPort; + } + + public static boolean isHttpsEnabled() { + return httpsEnable; + } + + public static int getHttpsPort() { + return httpsPort; + } } From 6a04e14f8767a90b5d709e3105f7f47500b4d21c Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Wed, 28 Jan 2026 13:09:10 +0100 Subject: [PATCH 076/126] VR: fix dns list in redundant VPC VRs (#12161) --- systemvm/debian/opt/cloud/bin/cs/CsDhcp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py b/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py index e15714af212..a2309067289 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py @@ -110,7 +110,7 @@ class CsDhcp(CsDataBag): if gn.get_dns() and device: sline = "dhcp-option=tag:interface-%s-%s,6" % (device, idx) dns_list = [x for x in gn.get_dns() if x] - if (self.config.is_vpc() or self.config.is_router()) and ('is_vr_guest_gateway' in gn.data and gn.data['is_vr_guest_gateway']): + if self.config.is_vpc() and not gn.is_vr_guest_gateway(): if gateway in dns_list: dns_list.remove(gateway) if gn.data['router_guest_ip'] != ip: From 37507e87990bdf2fe36767da996c079859c9ac0f Mon Sep 17 00:00:00 2001 From: dahn Date: Wed, 28 Jan 2026 13:11:24 +0100 Subject: [PATCH 077/126] address warnings in capacity manager (#11971) Co-authored-by: Daan Hoogland --- .../cloud/capacity/CapacityManagerImpl.java | 155 +++++------------- 1 file changed, 38 insertions(+), 117 deletions(-) diff --git a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java index 2de9abc827e..2940f900b08 100644 --- a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java @@ -50,7 +50,6 @@ import com.cloud.agent.api.AgentControlCommand; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; import com.cloud.agent.api.StartupCommand; -import com.cloud.agent.api.StartupRoutingCommand; import com.cloud.capacity.dao.CapacityDao; import com.cloud.configuration.Config; import com.cloud.dc.ClusterDetailsDao; @@ -82,7 +81,6 @@ import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; -import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; import com.cloud.utils.db.TransactionCallbackNoReturn; import com.cloud.utils.db.TransactionStatus; @@ -167,11 +165,6 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, return true; } - @Override - public boolean stop() { - return true; - } - @DB @Override public boolean releaseVmCapacity(VirtualMachine vm, final boolean moveFromReserved, final boolean moveToReservered, final Long hostId) { @@ -395,8 +388,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long cluster_id = host.getClusterId(); ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, VmDetailConstants.CPU_OVER_COMMIT_RATIO); ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, VmDetailConstants.MEMORY_OVER_COMMIT_RATIO); - Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); - Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); + float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); + float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); boolean hostHasCpuCapability, hostHasCapacity = false; hostHasCpuCapability = checkIfHostHasCpuCapability(host, cpucore, cpuspeed); @@ -424,14 +417,13 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, if (e instanceof CloudRuntimeException) { throw e; } - return; } } @Override public boolean checkIfHostHasCpuCapability(Host host, Integer cpuNum, Integer cpuSpeed) { // Check host can support the Cpu Number and Speed. - boolean isCpuNumGood = host.getCpus().intValue() >= cpuNum; + boolean isCpuNumGood = host.getCpus() >= cpuNum; boolean isCpuSpeedGood = host.getSpeed().intValue() >= cpuSpeed; boolean hasCpuCapability = isCpuNumGood && isCpuSpeedGood; @@ -482,13 +474,10 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, String failureReason = ""; if (checkFromReservedCapacity) { - long freeCpu = reservedCpu; - long freeMem = reservedMem; - if (logger.isDebugEnabled()) { logger.debug("We need to allocate to the last host again, so checking if there is enough reserved capacity"); - logger.debug("Reserved CPU: " + freeCpu + " , Requested CPU: " + cpu); - logger.debug("Reserved RAM: " + toHumanReadableSize(freeMem) + " , Requested RAM: " + toHumanReadableSize(ram)); + logger.debug("Reserved CPU: " + reservedCpu + " , Requested CPU: " + cpu); + logger.debug("Reserved RAM: " + toHumanReadableSize(reservedMem) + " , Requested RAM: " + toHumanReadableSize(ram)); } /* alloc from reserved */ if (reservedCpu >= cpu) { @@ -586,7 +575,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, @Override public long getAllocatedPoolCapacity(StoragePoolVO pool, VMTemplateVO templateForVmCreation) { - long totalAllocatedSize = 0; + long totalAllocatedSize; // if the storage pool is managed, the used bytes can be larger than the sum of the sizes of all of the non-destroyed volumes // in this case, call getUsedBytes(StoragePoolVO) @@ -700,11 +689,11 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, Pair clusterValues = clusterValuesCache.get(host.getClusterId()); - Float clusterCpuOvercommitRatio = Float.parseFloat(clusterValues.first()); - Float clusterRamOvercommitRatio = Float.parseFloat(clusterValues.second()); + float clusterCpuOvercommitRatio = Float.parseFloat(clusterValues.first()); + float clusterRamOvercommitRatio = Float.parseFloat(clusterValues.second()); for (VMInstanceVO vm : vms) { - Float cpuOvercommitRatio = 1.0f; - Float ramOvercommitRatio = 1.0f; + float cpuOvercommitRatio; + float ramOvercommitRatio; Map vmDetails = getVmDetailsForCapacityCalculation(vm.getId()); String vmDetailCpu = vmDetails.get(VmDetailConstants.CPU_OVER_COMMIT_RATIO); String vmDetailRam = vmDetails.get(VmDetailConstants.MEMORY_OVER_COMMIT_RATIO); @@ -717,21 +706,22 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, } if (so.isDynamic()) { usedMemory += - ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.memory.name())) * 1024L * 1024L) / ramOvercommitRatio) * - clusterRamOvercommitRatio; + (long) (((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.memory.name())) * 1024L * 1024L) + / ramOvercommitRatio) * clusterRamOvercommitRatio); if(vmDetails.containsKey(UsageEventVO.DynamicParameters.cpuSpeed.name())) { usedCpu += - ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())) * Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuSpeed.name()))) / cpuOvercommitRatio) * - clusterCpuOvercommitRatio; + (long) ((((long) Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())) + * Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuSpeed.name()))) + / cpuOvercommitRatio) * clusterCpuOvercommitRatio); } else { usedCpu += - ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())) * so.getSpeed()) / cpuOvercommitRatio) * - clusterCpuOvercommitRatio; + (long) ((((long) Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())) * so.getSpeed()) / cpuOvercommitRatio) * + clusterCpuOvercommitRatio); } usedCpuCore += Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())); } else { - usedMemory += ((so.getRamSize() * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio; - usedCpu += ((so.getCpu() * so.getSpeed()) / cpuOvercommitRatio) * clusterCpuOvercommitRatio; + usedMemory += (long) (((so.getRamSize() * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio); + usedCpu += (long) ((((long) so.getCpu() * so.getSpeed()) / cpuOvercommitRatio) * clusterCpuOvercommitRatio); usedCpuCore += so.getCpu(); } } @@ -740,8 +730,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, logger.debug("Found {} VM, not running on {}", vmsByLastHostId.size(), host); for (VMInstanceVO vm : vmsByLastHostId) { - Float cpuOvercommitRatio = 1.0f; - Float ramOvercommitRatio = 1.0f; + float cpuOvercommitRatio = 1.0f; + float ramOvercommitRatio = 1.0f; long lastModificationTime = Optional.ofNullable(vm.getUpdateTime()).orElse(vm.getCreated()).getTime(); long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - lastModificationTime) / 1000; if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { @@ -761,28 +751,28 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, } if (so.isDynamic()) { reservedMemory += - ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.memory.name())) * 1024L * 1024L) / ramOvercommitRatio) * - clusterRamOvercommitRatio; + (long) (((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.memory.name())) * 1024L * 1024L) / ramOvercommitRatio) * + clusterRamOvercommitRatio); if(vmDetails.containsKey(UsageEventVO.DynamicParameters.cpuSpeed.name())) { reservedCpu += - ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())) * Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuSpeed.name()))) / cpuOvercommitRatio) * - clusterCpuOvercommitRatio; + (long) (((Long.parseLong(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())) * Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuSpeed.name()))) / cpuOvercommitRatio) * + clusterCpuOvercommitRatio); } else { reservedCpu += - ((Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())) * so.getSpeed()) / cpuOvercommitRatio) * - clusterCpuOvercommitRatio; + (long) (((Long.parseLong(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())) * so.getSpeed()) / cpuOvercommitRatio) * + clusterCpuOvercommitRatio); } reservedCpuCore += Integer.parseInt(vmDetails.get(UsageEventVO.DynamicParameters.cpuNumber.name())); } else { - reservedMemory += ((so.getRamSize() * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio; - reservedCpu += (so.getCpu() * so.getSpeed() / cpuOvercommitRatio) * clusterCpuOvercommitRatio; + reservedMemory += (long) (((so.getRamSize() * 1024L * 1024L) / ramOvercommitRatio) * clusterRamOvercommitRatio); + reservedCpu += (long) (((long) so.getCpu() * so.getSpeed() / cpuOvercommitRatio) * clusterCpuOvercommitRatio); reservedCpuCore += so.getCpu(); } } else { // signal if not done already, that the VM has been stopped for skip.counting.hours, // hence capacity will not be reserved anymore. VMInstanceDetailVO messageSentFlag = _vmInstanceDetailsDao.findDetail(vm.getId(), VmDetailConstants.MESSAGE_RESERVED_CAPACITY_FREED_FLAG); - if (messageSentFlag == null || !Boolean.valueOf(messageSentFlag.getValue())) { + if (messageSentFlag == null || !Boolean.parseBoolean(messageSentFlag.getValue())) { _messageBus.publish(_name, "VM_ReservedCapacity_Free", PublishScope.LOCAL, vm); if (vm.getType() == VirtualMachine.Type.User) { @@ -859,7 +849,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, if (host.getTotalMemory() != null) { memCap.setTotalCapacity(host.getTotalMemory()); } - long hostTotalCpu = host.getCpus().longValue() * host.getSpeed().longValue(); + long hostTotalCpu = host.getCpus().longValue() * host.getSpeed(); if (cpuCap.getTotalCapacity() != hostTotalCpu) { logger.debug("Calibrate total cpu for host: {} old total CPU:{} new total CPU:{}", host, cpuCap.getTotalCapacity(), hostTotalCpu); @@ -938,7 +928,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, capacity = new CapacityVO(host.getId(), host.getDataCenterId(), host.getPodId(), host.getClusterId(), usedCpuFinal, host.getCpus().longValue() * - host.getSpeed().longValue(), Capacity.CAPACITY_TYPE_CPU); + host.getSpeed(), Capacity.CAPACITY_TYPE_CPU); capacity.setReservedCapacity(reservedCpuFinal); capacity.setCapacityState(capacityState); _capacityDao.persist(capacity); @@ -1029,78 +1019,10 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, return true; } - // TODO: Get rid of this case once we've determined that the capacity listeners above have all the changes - // create capacity entries if none exist for this server - private void createCapacityEntry(StartupCommand startup, HostVO server) { - SearchCriteria capacitySC = _capacityDao.createSearchCriteria(); - capacitySC.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, server.getId()); - capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, server.getDataCenterId()); - capacitySC.addAnd("podId", SearchCriteria.Op.EQ, server.getPodId()); - - if (startup instanceof StartupRoutingCommand) { - SearchCriteria capacityCPU = _capacityDao.createSearchCriteria(); - capacityCPU.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, server.getId()); - capacityCPU.addAnd("dataCenterId", SearchCriteria.Op.EQ, server.getDataCenterId()); - capacityCPU.addAnd("podId", SearchCriteria.Op.EQ, server.getPodId()); - capacityCPU.addAnd("capacityType", SearchCriteria.Op.EQ, Capacity.CAPACITY_TYPE_CPU); - List capacityVOCpus = _capacityDao.search(capacitySC, null); - Float cpuovercommitratio = Float.parseFloat(_clusterDetailsDao.findDetail(server.getClusterId(), VmDetailConstants.CPU_OVER_COMMIT_RATIO).getValue()); - Float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(server.getClusterId(), VmDetailConstants.MEMORY_OVER_COMMIT_RATIO).getValue()); - - if (capacityVOCpus != null && !capacityVOCpus.isEmpty()) { - CapacityVO CapacityVOCpu = capacityVOCpus.get(0); - long newTotalCpu = (long)(server.getCpus().longValue() * server.getSpeed().longValue() * cpuovercommitratio); - if ((CapacityVOCpu.getTotalCapacity() <= newTotalCpu) || ((CapacityVOCpu.getUsedCapacity() + CapacityVOCpu.getReservedCapacity()) <= newTotalCpu)) { - CapacityVOCpu.setTotalCapacity(newTotalCpu); - } else if ((CapacityVOCpu.getUsedCapacity() + CapacityVOCpu.getReservedCapacity() > newTotalCpu) && (CapacityVOCpu.getUsedCapacity() < newTotalCpu)) { - CapacityVOCpu.setReservedCapacity(0); - CapacityVOCpu.setTotalCapacity(newTotalCpu); - } else { - logger.debug("What? new cpu is :" + newTotalCpu + ", old one is " + CapacityVOCpu.getUsedCapacity() + "," + CapacityVOCpu.getReservedCapacity() + - "," + CapacityVOCpu.getTotalCapacity()); - } - _capacityDao.update(CapacityVOCpu.getId(), CapacityVOCpu); - } else { - CapacityVO capacity = - new CapacityVO(server.getId(), server.getDataCenterId(), server.getPodId(), server.getClusterId(), 0L, server.getCpus().longValue() * - server.getSpeed().longValue(), Capacity.CAPACITY_TYPE_CPU); - _capacityDao.persist(capacity); - } - - SearchCriteria capacityMem = _capacityDao.createSearchCriteria(); - capacityMem.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, server.getId()); - capacityMem.addAnd("dataCenterId", SearchCriteria.Op.EQ, server.getDataCenterId()); - capacityMem.addAnd("podId", SearchCriteria.Op.EQ, server.getPodId()); - capacityMem.addAnd("capacityType", SearchCriteria.Op.EQ, Capacity.CAPACITY_TYPE_MEMORY); - List capacityVOMems = _capacityDao.search(capacityMem, null); - - if (capacityVOMems != null && !capacityVOMems.isEmpty()) { - CapacityVO CapacityVOMem = capacityVOMems.get(0); - long newTotalMem = (long)((server.getTotalMemory()) * memoryOvercommitRatio); - if (CapacityVOMem.getTotalCapacity() <= newTotalMem || (CapacityVOMem.getUsedCapacity() + CapacityVOMem.getReservedCapacity() <= newTotalMem)) { - CapacityVOMem.setTotalCapacity(newTotalMem); - } else if (CapacityVOMem.getUsedCapacity() + CapacityVOMem.getReservedCapacity() > newTotalMem && CapacityVOMem.getUsedCapacity() < newTotalMem) { - CapacityVOMem.setReservedCapacity(0); - CapacityVOMem.setTotalCapacity(newTotalMem); - } else { - logger.debug("What? new mem is :" + newTotalMem + ", old one is " + CapacityVOMem.getUsedCapacity() + "," + CapacityVOMem.getReservedCapacity() + - "," + CapacityVOMem.getTotalCapacity()); - } - _capacityDao.update(CapacityVOMem.getId(), CapacityVOMem); - } else { - CapacityVO capacity = - new CapacityVO(server.getId(), server.getDataCenterId(), server.getPodId(), server.getClusterId(), 0L, server.getTotalMemory(), - Capacity.CAPACITY_TYPE_MEMORY); - _capacityDao.persist(capacity); - } - } - - } - @Override public float getClusterOverProvisioningFactor(Long clusterId, short capacityType) { - String capacityOverProvisioningName = ""; + String capacityOverProvisioningName; if (capacityType == Capacity.CAPACITY_TYPE_CPU) { capacityOverProvisioningName = VmDetailConstants.CPU_OVER_COMMIT_RATIO; } else if (capacityType == Capacity.CAPACITY_TYPE_MEMORY) { @@ -1110,15 +1032,14 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, } ClusterDetailsVO clusterDetailCpu = _clusterDetailsDao.findDetail(clusterId, capacityOverProvisioningName); - Float clusterOverProvisioningRatio = Float.parseFloat(clusterDetailCpu.getValue()); - return clusterOverProvisioningRatio; + return Float.parseFloat(clusterDetailCpu.getValue()); } @Override public boolean checkIfClusterCrossesThreshold(Long clusterId, Integer cpuRequested, long ramRequested) { - Float clusterCpuOverProvisioning = getClusterOverProvisioningFactor(clusterId, Capacity.CAPACITY_TYPE_CPU); - Float clusterMemoryOverProvisioning = getClusterOverProvisioningFactor(clusterId, Capacity.CAPACITY_TYPE_MEMORY); + float clusterCpuOverProvisioning = getClusterOverProvisioningFactor(clusterId, Capacity.CAPACITY_TYPE_CPU); + float clusterMemoryOverProvisioning = getClusterOverProvisioningFactor(clusterId, Capacity.CAPACITY_TYPE_MEMORY); Float clusterCpuCapacityDisableThreshold = DeploymentClusterPlanner.ClusterCPUCapacityDisableThreshold.valueIn(clusterId); Float clusterMemoryCapacityDisableThreshold = DeploymentClusterPlanner.ClusterMemoryCapacityDisableThreshold.valueIn(clusterId); @@ -1148,8 +1069,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, int cpu_requested = offering.getCpu() * offering.getSpeed(); long ram_requested = offering.getRamSize() * 1024L * 1024L; Pair clusterDetails = getClusterValues(host.getClusterId()); - Float cpuOvercommitRatio = Float.parseFloat(clusterDetails.first()); - Float memoryOvercommitRatio = Float.parseFloat(clusterDetails.second()); + float cpuOvercommitRatio = Float.parseFloat(clusterDetails.first()); + float memoryOvercommitRatio = Float.parseFloat(clusterDetails.second()); boolean hostHasCpuCapability = checkIfHostHasCpuCapability(host, offering.getCpu(), offering.getSpeed()); boolean hostHasCapacity = checkIfHostHasCapacity(host, cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, From 69c69dc537c025eca86dc0e48ae9dc7761887ef8 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Wed, 28 Jan 2026 17:53:39 +0530 Subject: [PATCH 078/126] server,engine-schema: make config - use.https.to.upload zone scoped (#11539) --- .../java/com/cloud/storage/VolumeApiService.java | 4 ++-- .../command/TemplateOrVolumePostUploadCommand.java | 14 +++++++++----- .../resources/META-INF/db/schema-42100to42200.sql | 5 +++++ .../com/cloud/storage/VolumeApiServiceImpl.java | 11 +++++++---- .../com/cloud/template/TemplateAdapterBase.java | 7 ++++--- .../com/cloud/template/TemplateManagerImpl.java | 2 +- .../SecondaryStorageManagerImpl.java | 2 +- 7 files changed, 29 insertions(+), 16 deletions(-) diff --git a/api/src/main/java/com/cloud/storage/VolumeApiService.java b/api/src/main/java/com/cloud/storage/VolumeApiService.java index 19c2ebe455a..1a9bcc6ee98 100644 --- a/api/src/main/java/com/cloud/storage/VolumeApiService.java +++ b/api/src/main/java/com/cloud/storage/VolumeApiService.java @@ -56,9 +56,9 @@ public interface VolumeApiService { Boolean.class, "use.https.to.upload", "true", - "Determines the protocol (HTTPS or HTTP) ACS will use to generate links to upload ISOs, volumes, and templates. When set as 'true', ACS will use protocol HTTPS, otherwise, it will use protocol HTTP. Default value is 'true'.", + "Controls whether upload links for ISOs, volumes, and templates use HTTPS (true, default) or HTTP (false). After changing this setting, the Secondary Storage VM (SSVM) must be recreated", true, - ConfigKey.Scope.StoragePool); + ConfigKey.Scope.Zone); /** * Creates the database object for a volume based on the given criteria diff --git a/core/src/main/java/org/apache/cloudstack/storage/command/TemplateOrVolumePostUploadCommand.java b/core/src/main/java/org/apache/cloudstack/storage/command/TemplateOrVolumePostUploadCommand.java index 3ac83031eaf..253a2607a72 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/command/TemplateOrVolumePostUploadCommand.java +++ b/core/src/main/java/org/apache/cloudstack/storage/command/TemplateOrVolumePostUploadCommand.java @@ -57,8 +57,10 @@ public class TemplateOrVolumePostUploadCommand { private String nfsVersion; - public TemplateOrVolumePostUploadCommand(long entityId, String entityUUID, String absolutePath, String checksum, String type, String name, String imageFormat, String dataTo, - String dataToRole) { + private long zoneId; + + public TemplateOrVolumePostUploadCommand(long entityId, String entityUUID, String absolutePath, String checksum, + String type, String name, String imageFormat, String dataTo, String dataToRole, long zoneId) { this.entityId = entityId; this.entityUUID = entityUUID; this.absolutePath = absolutePath; @@ -68,9 +70,7 @@ public class TemplateOrVolumePostUploadCommand { this.imageFormat = imageFormat; this.dataTo = dataTo; this.dataToRole = dataToRole; - } - - public TemplateOrVolumePostUploadCommand() { + this.zoneId = zoneId; } public String getRemoteEndPoint() { @@ -216,4 +216,8 @@ public class TemplateOrVolumePostUploadCommand { public long getProcessTimeout() { return processTimeout; } + + public long getZoneId() { + return zoneId; + } } diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql b/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql index b523016aa3d..858c46a7c1e 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql @@ -87,3 +87,8 @@ CALL `cloud`.`INSERT_EXTENSION_DETAIL_IF_NOT_EXISTS`('MaaS', 'orchestratorrequir CALL `cloud`.`IDEMPOTENT_DROP_UNIQUE_KEY`('counter', 'uc_counter__provider__source__value'); CALL `cloud`.`IDEMPOTENT_ADD_UNIQUE_KEY`('cloud.counter', 'uc_counter__provider__source__value__removed', '(provider, source, value, removed)'); + +-- Change scope for configuration - 'use.https.to.upload from' from StoragePool to Zone +UPDATE `cloud`.`configuration` SET `scope` = 2 WHERE `name` = 'use.https.to.upload'; +-- Delete the configuration for 'use.https.to.upload' from StoragePool +DELETE FROM `cloud`.`storage_pool_details` WHERE `name` = 'use.https.to.upload'; diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index ce7fab9272e..31bf80e6459 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -504,9 +504,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic GetUploadParamsResponse response = new GetUploadParamsResponse(); String ssvmUrlDomain = _configDao.getValue(Config.SecStorageSecureCopyCert.key()); - String protocol = UseHttpsToUpload.value() ? "https" : "http"; + String protocol = UseHttpsToUpload.valueIn(zoneId) ? "https" : "http"; - String url = ImageStoreUtil.generatePostUploadUrl(ssvmUrlDomain, ep.getPublicAddr(), vol.getUuid(), protocol); + String url = ImageStoreUtil.generatePostUploadUrl(ssvmUrlDomain, ep.getPublicAddr(), vol.getUuid(), + protocol); response.setPostURL(new URL(url)); // set the post url, this is used in the monitoring thread to determine the SSVM @@ -526,8 +527,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic /* * encoded metadata using the post upload config key */ - TemplateOrVolumePostUploadCommand command = new TemplateOrVolumePostUploadCommand(vol.getId(), vol.getUuid(), volumeStore.getInstallPath(), cmd.getChecksum(), vol.getType().toString(), - vol.getName(), vol.getFormat().toString(), dataObject.getDataStore().getUri(), dataObject.getDataStore().getRole().toString()); + TemplateOrVolumePostUploadCommand command = new TemplateOrVolumePostUploadCommand(vol.getId(), + vol.getUuid(), volumeStore.getInstallPath(), cmd.getChecksum(), vol.getType().toString(), + vol.getName(), vol.getFormat().toString(), dataObject.getDataStore().getUri(), + dataObject.getDataStore().getRole().toString(), zoneId); command.setLocalPath(volumeStore.getLocalDownloadPath()); //using the existing max upload size configuration command.setProcessTimeout(NumbersUtil.parseLong(_configDao.getValue("vmware.package.ova.timeout"), 3600)); diff --git a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java index da620a3375a..b50acedddc0 100644 --- a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java +++ b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java @@ -234,9 +234,10 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat throw new CloudRuntimeException(errMsg); } - TemplateOrVolumePostUploadCommand payload = new TemplateOrVolumePostUploadCommand(template.getId(), template.getUuid(), tmpl.getInstallPath(), tmpl - .getChecksum(), tmpl.getType().toString(), template.getUniqueName(), template.getFormat().toString(), templateOnStore.getDataStore().getUri(), - templateOnStore.getDataStore().getRole().toString()); + TemplateOrVolumePostUploadCommand payload = new TemplateOrVolumePostUploadCommand(template.getId(), + template.getUuid(), tmpl.getInstallPath(), tmpl.getChecksum(), tmpl.getType().toString(), + template.getUniqueName(), template.getFormat().toString(), templateOnStore.getDataStore().getUri(), + templateOnStore.getDataStore().getRole().toString(), zoneId_is); //using the existing max template size configuration payload.setMaxUploadSize(_configDao.getValue(Config.MaxTemplateAndIsoSize.key())); diff --git a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java index d751bf92e7b..91998877761 100755 --- a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java @@ -415,7 +415,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, TemplateOrVolumePostUploadCommand firstCommand = payload.get(0); String ssvmUrlDomain = _configDao.getValue(Config.SecStorageSecureCopyCert.key()); - String protocol = VolumeApiService.UseHttpsToUpload.value() ? "https" : "http"; + String protocol = VolumeApiService.UseHttpsToUpload.valueIn(firstCommand.getZoneId()) ? "https" : "http"; String url = ImageStoreUtil.generatePostUploadUrl(ssvmUrlDomain, firstCommand.getRemoteEndPoint(), firstCommand.getEntityUUID(), protocol); response.setPostURL(new URL(url)); diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java index e26091f677e..9d4c7311159 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java @@ -1252,7 +1252,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar logger.debug(String.format("Boot args for machine profile [%s]: [%s].", profile.toString(), bootArgs)); } - boolean useHttpsToUpload = BooleanUtils.toBooleanDefaultIfNull(VolumeApiService.UseHttpsToUpload.value(), true); + boolean useHttpsToUpload = VolumeApiService.UseHttpsToUpload.valueIn(dc.getId()); logger.debug(String.format("Setting UseHttpsToUpload config on cmdline with [%s] value.", useHttpsToUpload)); buf.append(" useHttpsToUpload=").append(useHttpsToUpload); From ded975ceb8ff2a972d029d289f0c889232707c42 Mon Sep 17 00:00:00 2001 From: Davi Torres <90287660+daviftorres@users.noreply.github.com> Date: Wed, 28 Jan 2026 08:43:05 -0500 Subject: [PATCH 079/126] Improve message "Network is unavailable. Please contact administrator" (#11981) * Improve message "Network is unavailable. Please contact administrator" This trivial change provide more details for root administrators during troubleshooting. See discussion https://github.com/apache/cloudstack/discussions/11980 * Improve error message for unavailable guest network * Update engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java Co-authored-by: dahn * Fix and refactor --------- Co-authored-by: dahn Co-authored-by: nvazquez --- .../main/java/com/cloud/vm/VirtualMachineManagerImpl.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 86f45630611..e8796fb0252 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -935,7 +935,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac throw new CloudRuntimeException(String.format("Unable to start a VM [%s] due to [%s].", vmUuid, e.getMessage()), e).add(VirtualMachine.class, vmUuid); } catch (final ResourceUnavailableException e) { if (e.getScope() != null && e.getScope().equals(VirtualRouter.class)){ - throw new CloudRuntimeException("Network is unavailable. Please contact administrator", e).add(VirtualMachine.class, vmUuid); + Account callingAccount = CallContext.current().getCallingAccount(); + String errorSuffix = (callingAccount != null && callingAccount.getType() == Account.Type.ADMIN) ? + String.format("Failure: %s", e.getMessage()) : + "Please contact administrator."; + throw new CloudRuntimeException(String.format("The Network for VM %s is unavailable. %s", vmUuid, errorSuffix), e).add(VirtualMachine.class, vmUuid); } throw new CloudRuntimeException(String.format("Unable to start a VM [%s] due to [%s].", vmUuid, e.getMessage()), e).add(VirtualMachine.class, vmUuid); } From 9fc93af85fb34b480065b975257992b1c5631fcd Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Wed, 28 Jan 2026 19:36:04 +0530 Subject: [PATCH 080/126] ui: allow actions for other users of root admin (#11319) Fixes #10306 Signed-off-by: Abhishek Kumar --- ui/src/config/section/user.js | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/ui/src/config/section/user.js b/ui/src/config/section/user.js index a18994fd6ce..65c1a17f760 100644 --- a/ui/src/config/section/user.js +++ b/ui/src/config/section/user.js @@ -105,9 +105,10 @@ export default { message: 'message.enable.user', dataView: true, show: (record, store) => { - return ['Admin', 'DomainAdmin'].includes(store.userInfo.roletype) && !record.isdefault && - !(record.domain === 'ROOT' && record.account === 'admin' && record.accounttype === 1) && - ['disabled', 'locked'].includes(record.state) + if (!['disabled', 'locked'].includes(record.state) || record.isdefault || !['Admin', 'DomainAdmin'].includes(store.userInfo.roletype)) { + return false + } + return ![1, 4].includes(record.accounttype) || store.userInfo.roletype === 'Admin' } }, { @@ -117,9 +118,10 @@ export default { message: 'message.disable.user', dataView: true, show: (record, store) => { - return ['Admin', 'DomainAdmin'].includes(store.userInfo.roletype) && !record.isdefault && - !(record.domain === 'ROOT' && record.account === 'admin' && record.accounttype === 1) && - record.state === 'enabled' + if (record.state !== 'enabled' || record.isdefault || !['Admin', 'DomainAdmin'].includes(store.userInfo.roletype)) { + return false + } + return ![1, 4].includes(record.accounttype) || (store.userInfo.roletype === 'Admin' && record.id !== store.userInfo.id) } }, { @@ -131,9 +133,10 @@ export default { dataView: true, popup: true, show: (record, store) => { - return ['Admin', 'DomainAdmin'].includes(store.userInfo.roletype) && !record.isdefault && - !(record.domain === 'ROOT' && record.account === 'admin' && record.accounttype === 1) && - record.state === 'enabled' + if (record.state !== 'enabled' || record.isdefault || !['Admin', 'DomainAdmin'].includes(store.userInfo.roletype)) { + return false + } + return ![1, 4].includes(record.accounttype) || (store.userInfo.roletype === 'Admin' && record.id !== store.userInfo.id) } }, { From 95de88a8ffee115a98fd34818394d420b01f8cdf Mon Sep 17 00:00:00 2001 From: Abhisar Sinha <63767682+abh1sar@users.noreply.github.com> Date: Wed, 28 Jan 2026 19:38:25 +0530 Subject: [PATCH 081/126] Usage server should takeover immediately if the other Usage server has been stopped gracefully (#12507) --- .../src/main/java/com/cloud/usage/dao/UsageJobDao.java | 2 ++ .../src/main/java/com/cloud/usage/dao/UsageJobDaoImpl.java | 3 ++- usage/src/main/java/com/cloud/usage/UsageManagerImpl.java | 5 +++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageJobDao.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageJobDao.java index d4038d4ceeb..b22ce69d94e 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageJobDao.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageJobDao.java @@ -28,6 +28,8 @@ public interface UsageJobDao extends GenericDao { UsageJobVO getLastJob(); + UsageJobVO getNextRecurringJob(); + UsageJobVO getNextImmediateJob(); long getLastJobSuccessDateMillis(); diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageJobDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageJobDaoImpl.java index 44a7d1a8b72..6f340501cf1 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageJobDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageJobDaoImpl.java @@ -156,7 +156,8 @@ public class UsageJobDaoImpl extends GenericDaoBase implements return jobs.get(0); } - private UsageJobVO getNextRecurringJob() { + @Override + public UsageJobVO getNextRecurringJob() { Filter filter = new Filter(UsageJobVO.class, "id", false, Long.valueOf(0), Long.valueOf(1)); SearchCriteria sc = createSearchCriteria(); sc.addAnd("endMillis", SearchCriteria.Op.EQ, Long.valueOf(0)); diff --git a/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java b/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java index 99de98f56e4..30cdfcf21f0 100644 --- a/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java +++ b/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java @@ -2257,6 +2257,11 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna } } + if (_usageJobDao.getNextRecurringJob() == null) { + // Own the usage processing immediately if no other node is owning it + _usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING); + } + Long jobId = _usageJobDao.checkHeartbeat(_hostname, _pid, _aggregationDuration); if (jobId != null) { // if I'm taking over the job...see how long it's been since the last job, and if it's more than the From 059debf212500c789d062abe7be00448a1744824 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Wed, 28 Jan 2026 19:39:37 +0530 Subject: [PATCH 082/126] Add the procedure files for insert extensions and update guest os category (#12482) * Add the procedure files for insert extensions and update guestos category * fixed indentation * Apply suggestions from code review Co-authored-by: Vishesh <8760112+vishesh92@users.noreply.github.com> --------- Co-authored-by: Vishesh <8760112+vishesh92@users.noreply.github.com> --- .../cloud.insert_category_if_not_exists.sql | 27 +++++++++++ ...on_custom_action_details_if_not_exists.sql | 46 +++++++++++++++++++ ..._extension_custom_action_if_not_exists.sql | 46 +++++++++++++++++++ ....insert_extension_detail_if_not_exists.sql | 39 ++++++++++++++++ .../cloud.insert_extension_if_not_exists.sql | 38 +++++++++++++++ .../cloud.update_category_for_guest_oses.sql | 33 +++++++++++++ ...w_and_delete_old_category_for_guest_os.sql | 35 ++++++++++++++ 7 files changed, 264 insertions(+) create mode 100644 engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_category_if_not_exists.sql create mode 100644 engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_custom_action_details_if_not_exists.sql create mode 100644 engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_custom_action_if_not_exists.sql create mode 100644 engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_detail_if_not_exists.sql create mode 100644 engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_if_not_exists.sql create mode 100644 engine/schema/src/main/resources/META-INF/db/procedures/cloud.update_category_for_guest_oses.sql create mode 100644 engine/schema/src/main/resources/META-INF/db/procedures/cloud.update_new_and_delete_old_category_for_guest_os.sql diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_category_if_not_exists.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_category_if_not_exists.sql new file mode 100644 index 00000000000..a82dc7204c2 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_category_if_not_exists.sql @@ -0,0 +1,27 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- Add new OS categories if not present +DROP PROCEDURE IF EXISTS `cloud`.`INSERT_CATEGORY_IF_NOT_EXIST`; +CREATE PROCEDURE `cloud`.`INSERT_CATEGORY_IF_NOT_EXIST`(IN os_name VARCHAR(255)) +BEGIN + IF NOT EXISTS ((SELECT 1 FROM `cloud`.`guest_os_category` WHERE name = os_name)) + THEN + INSERT INTO `cloud`.`guest_os_category` (name, uuid) + VALUES (os_name, UUID()) +; END IF +; END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_custom_action_details_if_not_exists.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_custom_action_details_if_not_exists.sql new file mode 100644 index 00000000000..77b16223626 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_custom_action_details_if_not_exists.sql @@ -0,0 +1,46 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +DROP PROCEDURE IF EXISTS `cloud`.`INSERT_EXTENSION_CUSTOM_ACTION_DETAILS_IF_NOT_EXISTS`; +CREATE PROCEDURE `cloud`.`INSERT_EXTENSION_CUSTOM_ACTION_DETAILS_IF_NOT_EXISTS` ( + IN ext_name VARCHAR(255), + IN action_name VARCHAR(255), + IN param_json TEXT +) +BEGIN + DECLARE action_id BIGINT UNSIGNED +; SELECT `eca`.`id` INTO action_id FROM `cloud`.`extension_custom_action` `eca` + JOIN `cloud`.`extension` `e` ON `e`.`id` = `eca`.`extension_id` + WHERE `eca`.`name` = action_name AND `e`.`name` = ext_name LIMIT 1 +; IF NOT EXISTS ( + SELECT 1 FROM `cloud`.`extension_custom_action_details` + WHERE `extension_custom_action_id` = action_id + AND `name` = 'parameters' + ) THEN + INSERT INTO `cloud`.`extension_custom_action_details` ( + `extension_custom_action_id`, + `name`, + `value`, + `display` + ) VALUES ( + action_id, + 'parameters', + param_json, + 0 + ) +; END IF +;END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_custom_action_if_not_exists.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_custom_action_if_not_exists.sql new file mode 100644 index 00000000000..9dbffa630f8 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_custom_action_if_not_exists.sql @@ -0,0 +1,46 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +DROP PROCEDURE IF EXISTS `cloud`.`INSERT_EXTENSION_CUSTOM_ACTION_IF_NOT_EXISTS`; +CREATE PROCEDURE `cloud`.`INSERT_EXTENSION_CUSTOM_ACTION_IF_NOT_EXISTS`( + IN ext_name VARCHAR(255), + IN action_name VARCHAR(255), + IN action_desc VARCHAR(4096), + IN resource_type VARCHAR(255), + IN allowed_roles INT UNSIGNED, + IN success_msg VARCHAR(4096), + IN error_msg VARCHAR(4096), + IN timeout_seconds INT UNSIGNED +) +BEGIN + DECLARE ext_id BIGINT +; SELECT `id` INTO ext_id FROM `cloud`.`extension` WHERE `name` = ext_name LIMIT 1 +; IF NOT EXISTS ( + SELECT 1 FROM `cloud`.`extension_custom_action` WHERE `name` = action_name AND `extension_id` = ext_id + ) THEN + INSERT INTO `cloud`.`extension_custom_action` ( + `uuid`, `name`, `description`, `extension_id`, `resource_type`, + `allowed_role_types`, `success_message`, `error_message`, + `enabled`, `timeout`, `created`, `removed` + ) + VALUES ( + UUID(), action_name, action_desc, ext_id, resource_type, + allowed_roles, success_msg, error_msg, + 1, timeout_seconds, NOW(), NULL + ) +; END IF +;END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_detail_if_not_exists.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_detail_if_not_exists.sql new file mode 100644 index 00000000000..f9d6c5da951 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_detail_if_not_exists.sql @@ -0,0 +1,39 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +DROP PROCEDURE IF EXISTS `cloud`.`INSERT_EXTENSION_DETAIL_IF_NOT_EXISTS`; +CREATE PROCEDURE `cloud`.`INSERT_EXTENSION_DETAIL_IF_NOT_EXISTS`( + IN ext_name VARCHAR(255), + IN detail_key VARCHAR(255), + IN detail_value TEXT, + IN display TINYINT(1) +) +BEGIN + DECLARE ext_id BIGINT +; SELECT `id` INTO ext_id FROM `cloud`.`extension` WHERE `name` = ext_name LIMIT 1 +; IF NOT EXISTS ( + SELECT 1 FROM `cloud`.`extension_details` + WHERE `extension_id` = ext_id AND `name` = detail_key + ) THEN + INSERT INTO `cloud`.`extension_details` ( + `extension_id`, `name`, `value`, `display` + ) + VALUES ( + ext_id, detail_key, detail_value, display + ) +; END IF +;END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_if_not_exists.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_if_not_exists.sql new file mode 100644 index 00000000000..8d74f9b2a98 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_if_not_exists.sql @@ -0,0 +1,38 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +DROP PROCEDURE IF EXISTS `cloud`.`INSERT_EXTENSION_IF_NOT_EXISTS`; +CREATE PROCEDURE `cloud`.`INSERT_EXTENSION_IF_NOT_EXISTS`( + IN ext_name VARCHAR(255), + IN ext_desc VARCHAR(255), + IN ext_path VARCHAR(255) +) +BEGIN + IF NOT EXISTS ( + SELECT 1 FROM `cloud`.`extension` WHERE `name` = ext_name + ) THEN + INSERT INTO `cloud`.`extension` ( + `uuid`, `name`, `description`, `type`, + `relative_path`, `path_ready`, + `is_user_defined`, `state`, `created`, `removed` + ) + VALUES ( + UUID(), ext_name, ext_desc, 'Orchestrator', + ext_path, 1, 0, 'Enabled', NOW(), NULL + ) +; END IF +;END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.update_category_for_guest_oses.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.update_category_for_guest_oses.sql new file mode 100644 index 00000000000..87f3a85d27e --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.update_category_for_guest_oses.sql @@ -0,0 +1,33 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- Move existing guest OS to new categories +DROP PROCEDURE IF EXISTS `cloud`.`UPDATE_CATEGORY_FOR_GUEST_OSES`; +CREATE PROCEDURE `cloud`.`UPDATE_CATEGORY_FOR_GUEST_OSES`(IN category_name VARCHAR(255), IN os_name VARCHAR(255)) +BEGIN + DECLARE category_id BIGINT +; SELECT `id` INTO category_id + FROM `cloud`.`guest_os_category` + WHERE `name` = category_name + LIMIT 1 +; IF category_id IS NULL THEN + SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = 'Category not found' +; END IF +; UPDATE `cloud`.`guest_os` + SET `category_id` = category_id + WHERE `display_name` LIKE CONCAT('%', os_name, '%') +; END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.update_new_and_delete_old_category_for_guest_os.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.update_new_and_delete_old_category_for_guest_os.sql new file mode 100644 index 00000000000..42f7aa738cf --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.update_new_and_delete_old_category_for_guest_os.sql @@ -0,0 +1,35 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- Move existing guest OS whose category will be deleted to Other category +DROP PROCEDURE IF EXISTS `cloud`.`UPDATE_NEW_AND_DELETE_OLD_CATEGORY_FOR_GUEST_OS`; +CREATE PROCEDURE `cloud`.`UPDATE_NEW_AND_DELETE_OLD_CATEGORY_FOR_GUEST_OS`(IN to_category_name VARCHAR(255), IN from_category_name VARCHAR(255)) +BEGIN + DECLARE done INT DEFAULT 0 +; DECLARE to_category_id BIGINT +; SELECT id INTO to_category_id + FROM `cloud`.`guest_os_category` + WHERE `name` = to_category_name + LIMIT 1 +; IF to_category_id IS NULL THEN + SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = 'ToCategory not found' +; END IF +; UPDATE `cloud`.`guest_os` + SET `category_id` = to_category_id + WHERE `category_id` = (SELECT `id` FROM `cloud`.`guest_os_category` WHERE `name` = from_category_name) +; UPDATE `cloud`.`guest_os_category` SET `removed`=now() WHERE `name` = from_category_name +; END; From 9956d325488246a1b6dd0d33398471c0c698de49 Mon Sep 17 00:00:00 2001 From: Daman Arora <61474540+Damans227@users.noreply.github.com> Date: Wed, 28 Jan 2026 09:11:14 -0500 Subject: [PATCH 083/126] Fix delete snapshot policy expunged volume (#12474) * use findByIdIncludingRemoved for volume retrieval in snapshot policy validation * add unit tests * add cleanup for orphan snapshot policies * delete snapshot policies when expunging volumes * update orphan cleanup to remove policies for volumes that are in expunged state or null --------- Co-authored-by: Daman Arora --- .../storage/volume/VolumeServiceImpl.java | 4 +- .../storage/snapshot/SnapshotManagerImpl.java | 18 +++- .../snapshot/SnapshotManagerImplTest.java | 92 +++++++++++++++++++ 3 files changed, 112 insertions(+), 2 deletions(-) diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index c59ae5b18e3..5e0eb7529ac 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -387,6 +387,7 @@ public class VolumeServiceImpl implements VolumeService { logger.info("Expunge volume with no data store specified"); if (canVolumeBeRemoved(volume.getId())) { logger.info("Volume {} is not referred anywhere, remove it from volumes table", volume); + snapshotMgr.deletePoliciesForVolume(volume.getId()); volDao.remove(volume.getId()); } future.complete(result); @@ -422,6 +423,7 @@ public class VolumeServiceImpl implements VolumeService { } VMTemplateVO template = templateDao.findById(vol.getTemplateId()); if (template != null && !template.isDeployAsIs()) { + snapshotMgr.deletePoliciesForVolume(vol.getId()); volDao.remove(vol.getId()); future.complete(result); return future; @@ -493,6 +495,7 @@ public class VolumeServiceImpl implements VolumeService { if (canVolumeBeRemoved(vo.getId())) { logger.info("Volume {} is not referred anywhere, remove it from volumes table", vo); + snapshotMgr.deletePoliciesForVolume(vo.getId()); volDao.remove(vo.getId()); } @@ -1657,7 +1660,6 @@ public class VolumeServiceImpl implements VolumeService { // mark volume entry in volumes table as destroy state VolumeInfo vol = volFactory.getVolume(volumeId); vol.stateTransit(Volume.Event.DestroyRequested); - snapshotMgr.deletePoliciesForVolume(volumeId); annotationDao.removeByEntityType(AnnotationService.EntityType.VOLUME.name(), vol.getUuid()); vol.stateTransit(Volume.Event.OperationSucceeded); diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java index ff9989acac3..886feea19f2 100755 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -1889,9 +1889,25 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement logger.debug("Failed to delete snapshot in destroying state: {}", snapshotVO); } } + cleanupOrphanSnapshotPolicies(); + return true; } + private void cleanupOrphanSnapshotPolicies() { + List policies = _snapshotPolicyDao.listActivePolicies(); + if (CollectionUtils.isEmpty(policies)) { + return; + } + for (SnapshotPolicyVO policy : policies) { + VolumeVO volume = _volsDao.findByIdIncludingRemoved(policy.getVolumeId()); + if (volume == null || volume.getState() == Volume.State.Expunged) { + logger.info("Removing orphan snapshot policy {} for non-existent volume {}", policy.getId(), policy.getVolumeId()); + deletePolicy(policy.getId()); + } + } + } + @Override public boolean stop() { backupSnapshotExecutor.shutdown(); @@ -1924,7 +1940,7 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement if (snapshotPolicyVO == null) { throw new InvalidParameterValueException("Policy id given: " + policy + " does not exist"); } - VolumeVO volume = _volsDao.findById(snapshotPolicyVO.getVolumeId()); + VolumeVO volume = _volsDao.findByIdIncludingRemoved(snapshotPolicyVO.getVolumeId()); if (volume == null) { throw new InvalidParameterValueException("Policy id given: " + policy + " does not belong to a valid volume"); } diff --git a/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerImplTest.java b/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerImplTest.java index 367a49a801f..2d3cb04ab96 100644 --- a/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerImplTest.java +++ b/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerImplTest.java @@ -30,6 +30,7 @@ import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotPolicyVO; import com.cloud.storage.SnapshotVO; import com.cloud.storage.VolumeVO; +import com.cloud.server.TaggedResourceService; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.SnapshotPolicyDao; import com.cloud.storage.dao.SnapshotZoneDao; @@ -44,6 +45,7 @@ import com.cloud.utils.Pair; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import org.apache.cloudstack.api.command.user.snapshot.DeleteSnapshotPoliciesCmd; import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotPoliciesCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; @@ -100,6 +102,10 @@ public class SnapshotManagerImplTest { VolumeDao volumeDao; @Mock SnapshotPolicyDao snapshotPolicyDao; + @Mock + SnapshotScheduler snapshotScheduler; + @Mock + TaggedResourceService taggedResourceService; @InjectMocks SnapshotManagerImpl snapshotManager = new SnapshotManagerImpl(); @@ -108,6 +114,8 @@ public class SnapshotManagerImplTest { snapshotManager._snapshotPolicyDao = snapshotPolicyDao; snapshotManager._volsDao = volumeDao; snapshotManager._accountMgr = accountManager; + snapshotManager._snapSchedMgr = snapshotScheduler; + snapshotManager.taggedResourceService = taggedResourceService; } @After @@ -520,4 +528,88 @@ public class SnapshotManagerImplTest { Assert.assertEquals(1, result.first().size()); Assert.assertEquals(Integer.valueOf(1), result.second()); } + + @Test + public void testDeleteSnapshotPoliciesForRemovedVolume() { + Long policyId = 1L; + Long volumeId = 10L; + Long accountId = 2L; + + DeleteSnapshotPoliciesCmd cmd = Mockito.mock(DeleteSnapshotPoliciesCmd.class); + Mockito.when(cmd.getId()).thenReturn(policyId); + Mockito.when(cmd.getIds()).thenReturn(null); + + Account caller = Mockito.mock(Account.class); + Mockito.when(caller.getId()).thenReturn(accountId); + CallContext.register(Mockito.mock(User.class), caller); + + SnapshotPolicyVO policyVO = Mockito.mock(SnapshotPolicyVO.class); + Mockito.when(policyVO.getId()).thenReturn(policyId); + Mockito.when(policyVO.getVolumeId()).thenReturn(volumeId); + Mockito.when(policyVO.getUuid()).thenReturn("policy-uuid"); + Mockito.when(snapshotPolicyDao.findById(policyId)).thenReturn(policyVO); + + // Volume is removed (expunged) but findByIdIncludingRemoved should still return it + VolumeVO volumeVO = Mockito.mock(VolumeVO.class); + Mockito.when(volumeDao.findByIdIncludingRemoved(volumeId)).thenReturn(volumeVO); + + Mockito.when(snapshotPolicyDao.remove(policyId)).thenReturn(true); + + boolean result = snapshotManager.deleteSnapshotPolicies(cmd); + + Assert.assertTrue(result); + Mockito.verify(volumeDao).findByIdIncludingRemoved(volumeId); + Mockito.verify(snapshotScheduler).removeSchedule(volumeId, policyId); + Mockito.verify(snapshotPolicyDao).remove(policyId); + } + + @Test(expected = InvalidParameterValueException.class) + public void testDeleteSnapshotPoliciesNoPolicyId() { + DeleteSnapshotPoliciesCmd cmd = Mockito.mock(DeleteSnapshotPoliciesCmd.class); + Mockito.when(cmd.getId()).thenReturn(null); + Mockito.when(cmd.getIds()).thenReturn(null); + + snapshotManager.deleteSnapshotPolicies(cmd); + } + + @Test(expected = InvalidParameterValueException.class) + public void testDeleteSnapshotPoliciesPolicyNotFound() { + Long policyId = 1L; + + DeleteSnapshotPoliciesCmd cmd = Mockito.mock(DeleteSnapshotPoliciesCmd.class); + Mockito.when(cmd.getId()).thenReturn(policyId); + Mockito.when(cmd.getIds()).thenReturn(null); + + Mockito.when(snapshotPolicyDao.findById(policyId)).thenReturn(null); + + snapshotManager.deleteSnapshotPolicies(cmd); + } + + @Test(expected = InvalidParameterValueException.class) + public void testDeleteSnapshotPoliciesVolumeNotFound() { + Long policyId = 1L; + Long volumeId = 10L; + + DeleteSnapshotPoliciesCmd cmd = Mockito.mock(DeleteSnapshotPoliciesCmd.class); + Mockito.when(cmd.getId()).thenReturn(policyId); + Mockito.when(cmd.getIds()).thenReturn(null); + + SnapshotPolicyVO policyVO = Mockito.mock(SnapshotPolicyVO.class); + Mockito.when(policyVO.getVolumeId()).thenReturn(volumeId); + Mockito.when(snapshotPolicyDao.findById(policyId)).thenReturn(policyVO); + + // Volume doesn't exist at all (even when including removed) + Mockito.when(volumeDao.findByIdIncludingRemoved(volumeId)).thenReturn(null); + + snapshotManager.deleteSnapshotPolicies(cmd); + } + + @Test(expected = InvalidParameterValueException.class) + public void testDeleteSnapshotPoliciesManualPolicyId() { + DeleteSnapshotPoliciesCmd cmd = Mockito.mock(DeleteSnapshotPoliciesCmd.class); + Mockito.when(cmd.getId()).thenReturn(Snapshot.MANUAL_POLICY_ID); + Mockito.when(cmd.getIds()).thenReturn(null); + + snapshotManager.deleteSnapshotPolicies(cmd); + } } From 7786cf93c28fec5e5baf5d069f4f4d08e1750ab8 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 28 Jan 2026 09:13:56 -0500 Subject: [PATCH 084/126] Veeam: Use restore timeout as an interval as opposed to a counter (#11772) * Veeam: Use restore timeout as a time interval as opposed to a counter * fix log * fix unit test * remove unused imports * fix comment * unused import * change to while - issure refactoring --- .../org/apache/cloudstack/backup/veeam/VeeamClient.java | 7 +++++-- .../apache/cloudstack/backup/veeam/VeeamClientTest.java | 5 ++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java index fe3633dab16..8a111f92868 100644 --- a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java +++ b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java @@ -364,7 +364,9 @@ public class VeeamClient { * that is used to wait for the restore to complete before throwing a {@link CloudRuntimeException}. */ protected void checkIfRestoreSessionFinished(String type, String path) throws IOException { - for (int j = 0; j < restoreTimeout; j++) { + long startTime = System.currentTimeMillis(); + long timeoutMs = restoreTimeout * 1000L; + while (System.currentTimeMillis() - startTime < timeoutMs) { HttpResponse relatedResponse = get(path); RestoreSession session = parseRestoreSessionResponse(relatedResponse); if (session.getResult().equals("Success")) { @@ -378,7 +380,8 @@ public class VeeamClient { getRestoreVmErrorDescription(StringUtils.substringAfterLast(sessionUid, ":")))); throw new CloudRuntimeException(String.format("Restore job [%s] failed.", sessionUid)); } - logger.debug(String.format("Waiting %s seconds, out of a total of %s seconds, for the restore backup process to finish.", j, restoreTimeout)); + logger.debug("Waiting {} seconds, out of a total of {} seconds, for the restore backup process to finish.", + (System.currentTimeMillis() - startTime) / 1000, restoreTimeout); try { Thread.sleep(1000); diff --git a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java index 0c70c75939e..333c3e16053 100644 --- a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java +++ b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java @@ -25,7 +25,6 @@ import static com.github.tomakehurst.wiremock.client.WireMock.postRequestedFor; import static com.github.tomakehurst.wiremock.client.WireMock.urlMatching; import static com.github.tomakehurst.wiremock.client.WireMock.verify; import static org.junit.Assert.fail; -import static org.mockito.Mockito.times; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -157,7 +156,7 @@ public class VeeamClientTest { @Test public void checkIfRestoreSessionFinishedTestTimeoutException() throws IOException { try { - ReflectionTestUtils.setField(mockClient, "restoreTimeout", 10); + ReflectionTestUtils.setField(mockClient, "restoreTimeout", 2); RestoreSession restoreSession = Mockito.mock(RestoreSession.class); HttpResponse httpResponse = Mockito.mock(HttpResponse.class); Mockito.when(mockClient.get(Mockito.anyString())).thenReturn(httpResponse); @@ -169,7 +168,7 @@ public class VeeamClientTest { } catch (Exception e) { Assert.assertEquals("Related job type: RestoreTest was not successful", e.getMessage()); } - Mockito.verify(mockClient, times(10)).get(Mockito.anyString()); + Mockito.verify(mockClient, Mockito.atLeastOnce()).get(Mockito.anyString()); } @Test From d83c2e42253c870955a05a386db5ec266e04a3f3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Jan 2026 15:30:46 +0100 Subject: [PATCH 085/126] Bump com.vmware.vapi:vapi-runtime from 2.40.0 to 2.61.2 (#12495) --- plugins/network-elements/nsx/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/network-elements/nsx/pom.xml b/plugins/network-elements/nsx/pom.xml index e6431d074ca..9c0f680068c 100644 --- a/plugins/network-elements/nsx/pom.xml +++ b/plugins/network-elements/nsx/pom.xml @@ -59,7 +59,7 @@ com.vmware.vapi vapi-runtime - 2.40.0 + 2.61.2 From 1300fc5e91ac0c6ab57dbd35bb083944e9f94cef Mon Sep 17 00:00:00 2001 From: Jeevan Date: Wed, 28 Jan 2026 20:56:37 +0530 Subject: [PATCH 086/126] Fix keyword parameter filtering in listBackupOfferings API (#12540) Signed-off-by: Jeevan Yewale Co-authored-by: Jeevan Yewale --- .../java/org/apache/cloudstack/backup/BackupManagerImpl.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java index 1e6ef1a7852..dc2677a507f 100644 --- a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java @@ -239,7 +239,8 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { final Filter searchFilter = new Filter(BackupOfferingVO.class, "id", true, cmd.getStartIndex(), cmd.getPageSizeVal()); SearchBuilder sb = backupOfferingDao.createSearchBuilder(); sb.and("zone_id", sb.entity().getZoneId(), SearchCriteria.Op.EQ); - sb.and("name", sb.entity().getName(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE); + CallContext ctx = CallContext.current(); final Account caller = ctx.getCallingAccount(); if (Account.Type.NORMAL == caller.getType()) { From 106f478e2c988a6cddbf302e1e83e5c344b53464 Mon Sep 17 00:00:00 2001 From: Harikrishna Date: Wed, 28 Jan 2026 22:46:38 +0530 Subject: [PATCH 087/126] Remove unnecessary stubbings in ManagementServerMaintenanceManagerImplTest (#11914) --- .../maintenance/ManagementServerMaintenanceManagerImplTest.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/plugins/maintenance/src/test/java/org/apache/cloudstack/maintenance/ManagementServerMaintenanceManagerImplTest.java b/plugins/maintenance/src/test/java/org/apache/cloudstack/maintenance/ManagementServerMaintenanceManagerImplTest.java index 280d1eaf9eb..a208893f6d1 100644 --- a/plugins/maintenance/src/test/java/org/apache/cloudstack/maintenance/ManagementServerMaintenanceManagerImplTest.java +++ b/plugins/maintenance/src/test/java/org/apache/cloudstack/maintenance/ManagementServerMaintenanceManagerImplTest.java @@ -321,7 +321,6 @@ public class ManagementServerMaintenanceManagerImplTest { spy.prepareForMaintenance("static", false); }); - Mockito.when(msHost.getState()).thenReturn(ManagementServerHost.State.Maintenance); Mockito.doNothing().when(jobManagerMock).enableAsyncJobs(); spy.cancelMaintenance(); Mockito.verify(jobManagerMock).enableAsyncJobs(); @@ -339,7 +338,6 @@ public class ManagementServerMaintenanceManagerImplTest { spy.prepareForMaintenance("static", false); }); - Mockito.when(msHost.getState()).thenReturn(ManagementServerHost.State.PreparingForMaintenance); Mockito.doNothing().when(jobManagerMock).enableAsyncJobs(); spy.cancelMaintenance(); Mockito.verify(jobManagerMock).enableAsyncJobs(); From 98debd235f99c2e6e0f49b69f7b16b5e3b9b0853 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Thu, 29 Jan 2026 10:35:32 +0530 Subject: [PATCH 088/126] gha: fix duplicate key for stale workflow Fixes failing pre-commit GHA Signed-off-by: Abhishek Kumar --- .github/workflows/stale.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index c957392c504..842e4497a4a 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -41,7 +41,6 @@ jobs: days-before-pr-close: 240 exempt-issue-labels: 'gsoc,good-first-issue,long-term-plan' exempt-pr-labels: 'status:ready-for-merge,status:needs-testing,status:on-hold' - days-before-close: -1 - uses: actions/stale@v10 with: stale-issue-label: 'archive' From 243872a77103494b3c030439e4ee24071a03ef2a Mon Sep 17 00:00:00 2001 From: Vishesh <8760112+vishesh92@users.noreply.github.com> Date: Thu, 29 Jan 2026 11:40:23 +0530 Subject: [PATCH 089/126] Use infinite scroll select (#11991) * addresses the domain selection (listed after the page size) with keyword search --- ui/src/components/view/DedicateDomain.vue | 133 +++++------ .../widgets/InfiniteScrollSelect.vue | 91 ++++++- ui/src/views/iam/AddUser.vue | 121 ++++------ ui/src/views/infra/UsageRecords.vue | 112 ++++----- ui/src/views/storage/CreateTemplate.vue | 111 ++++----- ui/src/views/storage/UploadLocalVolume.vue | 225 +++++++----------- ui/src/views/storage/UploadVolume.vue | 216 +++++++---------- ui/src/views/tools/CreateWebhook.vue | 124 ++++------ ui/src/views/tools/ManageVolumes.vue | 157 +++++------- 9 files changed, 560 insertions(+), 730 deletions(-) diff --git a/ui/src/components/view/DedicateDomain.vue b/ui/src/components/view/DedicateDomain.vue index 0b3645ce418..4b8cc31ae46 100644 --- a/ui/src/components/view/DedicateDomain.vue +++ b/ui/src/components/view/DedicateDomain.vue @@ -18,52 +18,44 @@ diff --git a/ui/src/config/router.js b/ui/src/config/router.js index 08df799dd89..3e5d8677b34 100644 --- a/ui/src/config/router.js +++ b/ui/src/config/router.js @@ -81,6 +81,7 @@ function generateRouterMap (section) { filters: child.filters, params: child.params ? child.params : {}, columns: child.columns, + advisories: !vueProps.$config.advisoriesDisabled ? child.advisories : undefined, details: child.details, searchFilters: child.searchFilters, related: child.related, @@ -180,6 +181,10 @@ function generateRouterMap (section) { map.meta.columns = section.columns } + if (!vueProps.$config.advisoriesDisabled && section.advisories) { + map.meta.advisories = section.advisories + } + if (section.actions) { map.meta.actions = section.actions } diff --git a/ui/src/config/section/compute.js b/ui/src/config/section/compute.js index a03693e351d..32e888bb53d 100644 --- a/ui/src/config/section/compute.js +++ b/ui/src/config/section/compute.js @@ -18,6 +18,8 @@ import { shallowRef, defineAsyncComponent } from 'vue' import store from '@/store' import { isZoneCreated } from '@/utils/zone' +import { getAPI, postAPI, getBaseUrl } from '@/api' +import { getLatestKubernetesIsoParams } from '@/utils/acsrepo' import kubernetesIcon from '@/assets/icons/kubernetes.svg?inline' export default { @@ -582,6 +584,182 @@ export default { } ], resourceType: 'KubernetesCluster', + advisories: [ + { + id: 'cks-min-offering', + severity: 'warning', + message: 'message.advisory.cks.min.offering', + docsHelp: 'plugins/cloudstack-kubernetes-service.html', + dismissOnConditionFail: true, + condition: async (store) => { + if (!('listServiceOfferings' in store.getters.apis)) { + return false + } + const params = { + cpunumber: 2, + memory: 2048, + issystem: false + } + try { + const json = await getAPI('listServiceOfferings', params) + const offerings = json?.listserviceofferingsresponse?.serviceoffering || [] + return !offerings.some(o => !o.iscustomized) + } catch (error) {} + return false + }, + actions: [ + { + primary: true, + label: 'label.add.minimum.required.compute.offering', + loadingLabel: 'message.adding.minimum.required.compute.offering.kubernetes.cluster', + show: (store) => { return ('createServiceOffering' in store.getters.apis) }, + run: async () => { + const params = { + name: 'CKS Instance', + cpunumber: 2, + cpuspeed: 1000, + memory: 2048, + iscustomized: false, + issystem: false + } + try { + const json = await postAPI('createServiceOffering', params) + if (json?.createserviceofferingresponse?.serviceoffering) { + return true + } + } catch (error) {} + return false + }, + successMessage: 'message.added.minimum.required.compute.offering.kubernetes.cluster', + errorMessage: 'message.add.minimum.required.compute.offering.kubernetes.cluster.failed' + }, + { + label: 'label.go.to.compute.offerings', + show: (store) => { return ('listServiceOfferings' in store.getters.apis) }, + run: (store, router) => { + router.push({ name: 'computeoffering' }) + return false + } + } + ] + }, + { + id: 'cks-version-check', + severity: 'warning', + message: 'message.advisory.cks.version.check', + docsHelp: 'plugins/cloudstack-kubernetes-service.html', + dismissOnConditionFail: true, + condition: async (store) => { + const api = 'listKubernetesSupportedVersions' + if (!(api in store.getters.apis)) { + return false + } + try { + const json = await getAPI(api, {}) + const versions = json?.listkubernetessupportedversionsresponse?.kubernetessupportedversion || [] + return versions.length === 0 + } catch (error) {} + return false + }, + actions: [ + { + primary: true, + label: 'label.add.latest.kubernetes.iso', + loadingLabel: 'message.adding.latest.kubernetes.iso', + show: (store) => { return ('addKubernetesSupportedVersion' in store.getters.apis) }, + run: async () => { + let arch = 'x86_64' + if ('listClusters' in store.getters.apis) { + try { + const json = await getAPI('listClusters', { allocationstate: 'Enabled', page: 1, pagesize: 1 }) + const cluster = json?.listclustersresponse?.cluster?.[0] || {} + arch = cluster.architecture || 'x86_64' + } catch (error) {} + } + const params = await getLatestKubernetesIsoParams(arch) + try { + const json = await postAPI('addKubernetesSupportedVersion', params) + if (json?.addkubernetessupportedversionresponse?.kubernetessupportedversion) { + return true + } + } catch (error) {} + return false + }, + successMessage: 'message.added.latest.kubernetes.iso', + errorMessage: 'message.add.latest.kubernetes.iso.failed' + }, + { + label: 'label.go.to.kubernetes.isos', + show: true, + run: (store, router) => { + router.push({ name: 'kubernetesiso' }) + return false + } + } + ] + }, + { + id: 'cks-endpoint-url', + severity: 'warning', + message: 'message.advisory.cks.endpoint.url.not.configured', + docsHelp: 'plugins/cloudstack-kubernetes-service.html', + dismissOnConditionFail: true, + condition: async (store) => { + if (!['Admin'].includes(store.getters.userInfo.roletype)) { + return false + } + let url = '' + const baseUrl = getBaseUrl() + if (baseUrl.startsWith('/')) { + url = window.location.origin + baseUrl + } + if (!url || url.startsWith('http://localhost')) { + return false + } + const params = { + name: 'endpoint.url' + } + const json = await getAPI('listConfigurations', params) + const configuration = json?.listconfigurationsresponse?.configuration?.[0] || {} + return !configuration.value || configuration.value.startsWith('http://localhost') + }, + actions: [ + { + primary: true, + label: 'label.fix.global.setting', + show: (store) => { return ('updateConfiguration' in store.getters.apis) }, + run: async () => { + let url = '' + const baseUrl = getBaseUrl() + if (baseUrl.startsWith('/')) { + url = window.location.origin + baseUrl + } + const params = { + name: 'endpoint.url', + value: url + } + try { + const json = await postAPI('updateConfiguration', params) + if (json?.updateconfigurationresponse?.configuration) { + return true + } + } catch (error) {} + return false + }, + successMessage: 'message.global.setting.updated', + errorMessage: 'message.global.setting.update.failed' + }, + { + label: 'label.go.to.global.settings', + show: (store) => { return ('listConfigurations' in store.getters.apis) }, + run: (store, router) => { + router.push({ name: 'globalsetting' }) + return false + } + } + ] + } + ], actions: [ { api: 'createKubernetesCluster', diff --git a/ui/src/utils/acsrepo/index.js b/ui/src/utils/acsrepo/index.js new file mode 100644 index 00000000000..809bd7f1748 --- /dev/null +++ b/ui/src/utils/acsrepo/index.js @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +const BASE_KUBERNETES_ISO_URL = 'https://download.cloudstack.org/cks/' + +function getDefaultLatestKubernetesIsoParams (arch) { + return { + name: 'v1.33.1-calico-' + arch, + semanticversion: '1.33.1', + url: BASE_KUBERNETES_ISO_URL + 'setup-v1.33.1-calico-' + arch + '.iso', + arch: arch, + mincpunumber: 2, + minmemory: 2048 + } +} + +/** + * Returns the latest Kubernetes ISO info for the given architecture. + * Falls back to a hardcoded default if fetching fails. + * @param {string} arch + * @returns {Promise<{name: string, semanticversion: string, url: string, arch: string}>} + */ +export async function getLatestKubernetesIsoParams (arch) { + arch = arch || 'x86_64' + try { + const html = await fetch(BASE_KUBERNETES_ISO_URL, { cache: 'no-store' }).then(r => r.text()) + + const hrefs = [...html.matchAll(/href="([^"]+\.iso)"/gi)].map(m => m[1]) + + // Prefer files that explicitly include the arch (e.g. ...-x86_64.iso) + let isoHrefs = hrefs.filter(h => new RegExp(`${arch}\\.iso$`, 'i').test(h)) + + // Fallback: older files without arch suffix (e.g. setup-1.28.4.iso) + if (isoHrefs.length === 0) { + isoHrefs = hrefs.filter(h => /setup-\d+\.\d+\.\d+\.iso$/i.test(h)) + } + + const entries = isoHrefs.map(h => { + const m = h.match(/setup-(?:v)?(\d+\.\d+\.\d+)(?:-calico)?(?:-(x86_64|arm64))?/i) + return m + ? { + name: h.replace('.iso', ''), + semanticversion: m[1], + url: new URL(h, BASE_KUBERNETES_ISO_URL).toString(), + arch: m[2] || arch, + mincpunumber: 2, + minmemory: 2048 + } + : null + }).filter(Boolean) + + if (entries.length === 0) throw new Error('No matching ISOs found') + + entries.sort((a, b) => { + const pa = a.semanticversion.split('.').map(Number) + const pb = b.semanticversion.split('.').map(Number) + for (let i = 0; i < 3; i++) { + if ((pb[i] ?? 0) !== (pa[i] ?? 0)) return (pb[i] ?? 0) - (pa[i] ?? 0) + } + return 0 + }) + + return entries[0] + } catch { + return { ...getDefaultLatestKubernetesIsoParams(arch) } + } +} diff --git a/ui/src/views/AutogenView.vue b/ui/src/views/AutogenView.vue index e0583cd97a4..cfbaf580507 100644 --- a/ui/src/views/AutogenView.vue +++ b/ui/src/views/AutogenView.vue @@ -540,6 +540,9 @@ class="row-element" v-else > + Date: Thu, 29 Jan 2026 13:30:54 +0530 Subject: [PATCH 094/126] ui: allow viewing hosts from management server connected agents (#12149) Signed-off-by: Abhishek Kumar --- ui/src/components/view/ListView.vue | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ui/src/components/view/ListView.vue b/ui/src/components/view/ListView.vue index ebdfe3df1b4..7fd344da209 100644 --- a/ui/src/components/view/ListView.vue +++ b/ui/src/components/view/ListView.vue @@ -868,6 +868,14 @@ + @@ -313,13 +334,15 @@ import { getAPI, postAPI } from '@/api' import draggable from 'vuedraggable' import { mixinForm } from '@/utils/mixin' import TooltipButton from '@/components/widgets/TooltipButton' +import ImportNetworkACL from './ImportNetworkACL' export default { name: 'AclListRulesTab', mixins: [mixinForm], components: { draggable, - TooltipButton + TooltipButton, + ImportNetworkACL }, props: { resource: { @@ -344,6 +367,7 @@ export default { tagsModalVisible: false, tagsLoading: false, ruleModalVisible: false, + showImportModal: false, ruleModalTitle: this.$t('label.edit.rule'), ruleFormMode: 'edit' } @@ -788,6 +812,12 @@ export default { }, capitalise (val) { return val.toUpperCase() + }, + handleImportRules () { + this.showImportModal = true + }, + closeImportModal () { + this.showImportModal = false } } } diff --git a/ui/src/views/network/ImportNetworkACL.vue b/ui/src/views/network/ImportNetworkACL.vue new file mode 100644 index 00000000000..2456a75af45 --- /dev/null +++ b/ui/src/views/network/ImportNetworkACL.vue @@ -0,0 +1,381 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + + + + + + From 9b4f16b73fabd2c53cb9884326f8442936d66bad Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Thu, 29 Jan 2026 13:52:07 +0530 Subject: [PATCH 096/126] api,server: apis return their http request type (#11382) * api,server: apis return their http request type Signed-off-by: Abhishek Kumar * fix and unit test Signed-off-by: Abhishek Kumar * more test Signed-off-by: Abhishek Kumar * address copilot Signed-off-by: Abhishek Kumar * Update plugins/api/discovery/src/main/java/org/apache/cloudstack/api/response/ApiDiscoveryResponse.java --------- Signed-off-by: Abhishek Kumar Co-authored-by: dahn Co-authored-by: Harikrishna --- .../org/apache/cloudstack/api/APICommand.java | 2 + .../apache/cloudstack/api/ApiConstants.java | 1 + ...ntAllowedToCreateOfferingsWithTagsCmd.java | 3 +- .../api/response/ApiDiscoveryResponse.java | 13 ++ .../discovery/ApiDiscoveryServiceImpl.java | 9 +- .../ApiDiscoveryServiceImplTest.java | 123 ++++++++++++++ .../api/command/QuotaBalanceCmd.java | 3 +- .../api/command/QuotaEnabledCmd.java | 3 +- .../api/command/QuotaStatementCmd.java | 3 +- .../api/command/QuotaSummaryCmd.java | 3 +- .../api/command/QuotaTariffListCmd.java | 3 +- .../cloudian/api/CloudianIsEnabledCmd.java | 3 +- .../api/command/ReadyForShutdownCmd.java | 3 +- .../command/VerifyOAuthCodeAndGetUserCmd.java | 14 +- .../main/java/com/cloud/api/ApiServlet.java | 46 ++++-- .../java/com/cloud/api/ApiServletTest.java | 151 ++++++++++++++---- 16 files changed, 323 insertions(+), 60 deletions(-) create mode 100644 plugins/api/discovery/src/test/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImplTest.java diff --git a/api/src/main/java/org/apache/cloudstack/api/APICommand.java b/api/src/main/java/org/apache/cloudstack/api/APICommand.java index c559be08116..b77649046ca 100644 --- a/api/src/main/java/org/apache/cloudstack/api/APICommand.java +++ b/api/src/main/java/org/apache/cloudstack/api/APICommand.java @@ -50,4 +50,6 @@ public @interface APICommand { RoleType[] authorized() default {}; Class[] entityType() default {}; + + String httpMethod() default ""; } diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 2e0e843b535..d00e339de2f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -282,6 +282,7 @@ public class ApiConstants { public static final String HOST = "host"; public static final String HOST_CONTROL_STATE = "hostcontrolstate"; public static final String HOSTS_MAP = "hostsmap"; + public static final String HTTP_REQUEST_TYPE = "httprequesttype"; public static final String HYPERVISOR = "hypervisor"; public static final String INLINE = "inline"; public static final String INSTANCE = "instance"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/IsAccountAllowedToCreateOfferingsWithTagsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/IsAccountAllowedToCreateOfferingsWithTagsCmd.java index fcd6b03d3e5..4b1cd2ff725 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/IsAccountAllowedToCreateOfferingsWithTagsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/IsAccountAllowedToCreateOfferingsWithTagsCmd.java @@ -26,7 +26,8 @@ import org.apache.cloudstack.api.response.AccountResponse; import org.apache.cloudstack.api.response.IsAccountAllowedToCreateOfferingsWithTagsResponse; @APICommand(name = "isAccountAllowedToCreateOfferingsWithTags", description = "Return true if the specified account is allowed to create offerings with tags.", - responseObject = IsAccountAllowedToCreateOfferingsWithTagsResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) + responseObject = IsAccountAllowedToCreateOfferingsWithTagsResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + httpMethod = "GET") public class IsAccountAllowedToCreateOfferingsWithTagsCmd extends BaseCmd { @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = AccountResponse.class, description = "Account UUID", required = true) diff --git a/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/response/ApiDiscoveryResponse.java b/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/response/ApiDiscoveryResponse.java index 4b243f2e8a1..90b3b89d3fb 100644 --- a/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/response/ApiDiscoveryResponse.java +++ b/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/response/ApiDiscoveryResponse.java @@ -59,6 +59,10 @@ public class ApiDiscoveryResponse extends BaseResponse { @Param(description = "Response field type") private String type; + @SerializedName(ApiConstants.HTTP_REQUEST_TYPE) + @Param(description = "Preferred HTTP request type for the API", since = "4.23.0") + private String httpRequestType; + public ApiDiscoveryResponse() { params = new HashSet(); apiResponse = new HashSet(); @@ -74,6 +78,7 @@ public class ApiDiscoveryResponse extends BaseResponse { this.params = new HashSet<>(another.getParams()); this.apiResponse = new HashSet<>(another.getApiResponse()); this.type = another.getType(); + this.httpRequestType = another.getHttpRequestType(); this.setObjectName(another.getObjectName()); } @@ -140,4 +145,12 @@ public class ApiDiscoveryResponse extends BaseResponse { public String getType() { return type; } + + public String getHttpRequestType() { + return httpRequestType; + } + + public void setHttpRequestType(String httpRequestType) { + this.httpRequestType = httpRequestType; + } } diff --git a/plugins/api/discovery/src/main/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java b/plugins/api/discovery/src/main/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java index 452b95cf2c0..d6d235162ef 100644 --- a/plugins/api/discovery/src/main/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java +++ b/plugins/api/discovery/src/main/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java @@ -50,6 +50,7 @@ import org.apache.commons.lang3.StringUtils; import org.reflections.ReflectionUtils; import org.springframework.stereotype.Component; +import com.cloud.api.ApiServlet; import com.cloud.exception.PermissionDeniedException; import com.cloud.serializer.Param; import com.cloud.user.Account; @@ -189,7 +190,7 @@ public class ApiDiscoveryServiceImpl extends ComponentLifecycleBase implements A return responseResponse; } - private ApiDiscoveryResponse getCmdRequestMap(Class cmdClass, APICommand apiCmdAnnotation) { + protected ApiDiscoveryResponse getCmdRequestMap(Class cmdClass, APICommand apiCmdAnnotation) { String apiName = apiCmdAnnotation.name(); ApiDiscoveryResponse response = new ApiDiscoveryResponse(); response.setName(apiName); @@ -197,6 +198,12 @@ public class ApiDiscoveryServiceImpl extends ComponentLifecycleBase implements A if (!apiCmdAnnotation.since().isEmpty()) { response.setSince(apiCmdAnnotation.since()); } + String httpRequestType = apiCmdAnnotation.httpMethod(); + if (StringUtils.isBlank(httpRequestType)) { + httpRequestType = ApiServlet.GET_REQUEST_COMMANDS.matcher(apiName.toLowerCase()).matches() ? + "GET" : "POST"; + } + response.setHttpRequestType(httpRequestType); Set fields = ReflectUtil.getAllFieldsForClass(cmdClass, new Class[] {BaseCmd.class, BaseAsyncCmd.class, BaseAsyncCreateCmd.class}); diff --git a/plugins/api/discovery/src/test/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImplTest.java b/plugins/api/discovery/src/test/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImplTest.java new file mode 100644 index 00000000000..e69b9523d44 --- /dev/null +++ b/plugins/api/discovery/src/test/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImplTest.java @@ -0,0 +1,123 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.discovery; + +import static org.mockito.ArgumentMatchers.any; + +import java.lang.reflect.Field; +import java.util.Set; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.command.admin.account.CreateAccountCmd; +import org.apache.cloudstack.api.command.admin.user.GetUserCmd; +import org.apache.cloudstack.api.command.user.discovery.ListApisCmd; +import org.apache.cloudstack.api.response.ApiDiscoveryResponse; +import org.apache.cloudstack.api.response.ApiParameterResponse; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.utils.ReflectUtil; + +@RunWith(MockitoJUnitRunner.class) +public class ApiDiscoveryServiceImplTest { + + @Mock + APICommand apiCommandMock; + + @Spy + @InjectMocks + ApiDiscoveryServiceImpl discoveryServiceSpy; + + @Before + public void setUp() { + Mockito.when(apiCommandMock.name()).thenReturn("listApis"); + Mockito.when(apiCommandMock.since()).thenReturn(""); + } + + @Test + public void getCmdRequestMapReturnsResponseWithCorrectApiNameAndDescription() { + Mockito.when(apiCommandMock.description()).thenReturn("Lists all APIs"); + ApiDiscoveryResponse response = discoveryServiceSpy.getCmdRequestMap(ListApisCmd.class, apiCommandMock); + Assert.assertEquals("listApis", response.getName()); + Assert.assertEquals("Lists all APIs", response.getDescription()); + } + + @Test + public void getCmdRequestMapSetsHttpRequestTypeToGetWhenApiNameMatchesGetPattern() { + Mockito.when(apiCommandMock.name()).thenReturn("getUser"); + Mockito.when(apiCommandMock.httpMethod()).thenReturn(""); + ApiDiscoveryResponse response = discoveryServiceSpy.getCmdRequestMap(GetUserCmd.class, apiCommandMock); + Assert.assertEquals("GET", response.getHttpRequestType()); + } + + @Test + public void getCmdRequestMapSetsHttpRequestTypeToPostWhenApiNameDoesNotMatchGetPattern() { + Mockito.when(apiCommandMock.name()).thenReturn("createAccount"); + Mockito.when(apiCommandMock.httpMethod()).thenReturn(""); + ApiDiscoveryResponse response = discoveryServiceSpy.getCmdRequestMap(CreateAccountCmd.class, apiCommandMock); + Assert.assertEquals("POST", response.getHttpRequestType()); + } + + @Test + public void getCmdRequestMapSetsAsyncToTrueForAsyncCommand() { + Mockito.when(apiCommandMock.name()).thenReturn("asyncApi"); + ApiDiscoveryResponse response = discoveryServiceSpy.getCmdRequestMap(BaseAsyncCmd.class, apiCommandMock); + Assert.assertTrue(response.getAsync()); + } + + @Test + public void getCmdRequestMapDoesNotAddParamsWithoutParameterAnnotation() { + ApiDiscoveryResponse response = discoveryServiceSpy.getCmdRequestMap(BaseCmd.class, apiCommandMock); + Assert.assertFalse(response.getParams().isEmpty()); + Assert.assertEquals(1, response.getParams().size()); + } + + @Test + public void getCmdRequestMapAddsParamsWithExposedAndIncludedInApiDocAnnotations() { + Field fieldMock = Mockito.mock(Field.class); + Parameter parameterMock = Mockito.mock(Parameter.class); + Mockito.when(parameterMock.expose()).thenReturn(true); + Mockito.when(parameterMock.includeInApiDoc()).thenReturn(true); + Mockito.when(parameterMock.name()).thenReturn("paramName"); + Mockito.when(parameterMock.since()).thenReturn(""); + Mockito.when(parameterMock.entityType()).thenReturn(new Class[]{Object.class}); + Mockito.when(parameterMock.description()).thenReturn("paramDescription"); + Mockito.when(parameterMock.type()).thenReturn(BaseCmd.CommandType.STRING); + Mockito.when(fieldMock.getAnnotation(Parameter.class)).thenReturn(parameterMock); + try (MockedStatic reflectUtilMockedStatic = Mockito.mockStatic(ReflectUtil.class)) { + reflectUtilMockedStatic.when(() -> ReflectUtil.getAllFieldsForClass(any(Class.class), any(Class[].class))) + .thenReturn(Set.of(fieldMock)); + ApiDiscoveryResponse response = discoveryServiceSpy.getCmdRequestMap(ListApisCmd.class, apiCommandMock); + Set params = response.getParams(); + Assert.assertEquals(1, params.size()); + ApiParameterResponse paramResponse = params.iterator().next(); + Assert.assertEquals("paramName", ReflectionTestUtils.getField(paramResponse, "name")); + } + } +} diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaBalanceCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaBalanceCmd.java index cf39f802d34..0cec0df6618 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaBalanceCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaBalanceCmd.java @@ -35,7 +35,8 @@ import org.apache.cloudstack.api.response.QuotaResponseBuilder; import org.apache.cloudstack.quota.vo.QuotaBalanceVO; import org.apache.cloudstack.api.response.QuotaStatementItemResponse; -@APICommand(name = "quotaBalance", responseObject = QuotaStatementItemResponse.class, description = "Create a quota balance statement", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +@APICommand(name = "quotaBalance", responseObject = QuotaStatementItemResponse.class, description = "Create a quota balance statement", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + httpMethod = "GET") public class QuotaBalanceCmd extends BaseCmd { diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEnabledCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEnabledCmd.java index 4035a5205e6..af1d146ea9d 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEnabledCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEnabledCmd.java @@ -26,7 +26,8 @@ import org.apache.cloudstack.quota.QuotaService; import javax.inject.Inject; -@APICommand(name = "quotaIsEnabled", responseObject = QuotaEnabledResponse.class, description = "Return true if the plugin is enabled", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +@APICommand(name = "quotaIsEnabled", responseObject = QuotaEnabledResponse.class, description = "Return true if the plugin is enabled", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + httpMethod = "GET") public class QuotaEnabledCmd extends BaseCmd { diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaStatementCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaStatementCmd.java index 18f9bc48a6e..d3bd3868ed1 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaStatementCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaStatementCmd.java @@ -35,7 +35,8 @@ import org.apache.cloudstack.quota.vo.QuotaUsageVO; import com.cloud.user.Account; -@APICommand(name = "quotaStatement", responseObject = QuotaStatementItemResponse.class, description = "Create a quota statement", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +@APICommand(name = "quotaStatement", responseObject = QuotaStatementItemResponse.class, description = "Create a quota statement", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + httpMethod = "GET") public class QuotaStatementCmd extends BaseCmd { diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaSummaryCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaSummaryCmd.java index 42a598042b3..87322b01f4d 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaSummaryCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaSummaryCmd.java @@ -33,7 +33,8 @@ import java.util.List; import javax.inject.Inject; -@APICommand(name = "quotaSummary", responseObject = QuotaSummaryResponse.class, description = "Lists balance and quota usage for all Accounts", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +@APICommand(name = "quotaSummary", responseObject = QuotaSummaryResponse.class, description = "Lists balance and quota usage for all Accounts", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + httpMethod = "GET") public class QuotaSummaryCmd extends BaseListCmd { @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, required = false, description = "Optional, Account Id for which statement needs to be generated") diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffListCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffListCmd.java index d054d545931..e0bab07501b 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffListCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffListCmd.java @@ -38,7 +38,8 @@ import java.util.ArrayList; import java.util.Date; import java.util.List; -@APICommand(name = "quotaTariffList", responseObject = QuotaTariffResponse.class, description = "Lists all quota tariff plans", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +@APICommand(name = "quotaTariffList", responseObject = QuotaTariffResponse.class, description = "Lists all quota tariff plans", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + httpMethod = "GET") public class QuotaTariffListCmd extends BaseListCmd { @Inject diff --git a/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/api/CloudianIsEnabledCmd.java b/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/api/CloudianIsEnabledCmd.java index 56cb74e3cab..3c334ba55c2 100644 --- a/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/api/CloudianIsEnabledCmd.java +++ b/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/api/CloudianIsEnabledCmd.java @@ -31,7 +31,8 @@ import com.cloud.user.Account; responseObject = CloudianEnabledResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.11.0", - authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, + httpMethod = "GET") public class CloudianIsEnabledCmd extends BaseCmd { @Inject diff --git a/plugins/maintenance/src/main/java/org/apache/cloudstack/api/command/ReadyForShutdownCmd.java b/plugins/maintenance/src/main/java/org/apache/cloudstack/api/command/ReadyForShutdownCmd.java index 782b23a0422..36ec4fff9c9 100644 --- a/plugins/maintenance/src/main/java/org/apache/cloudstack/api/command/ReadyForShutdownCmd.java +++ b/plugins/maintenance/src/main/java/org/apache/cloudstack/api/command/ReadyForShutdownCmd.java @@ -26,7 +26,8 @@ import com.cloud.user.Account; description = "Returns the status of CloudStack, whether a shutdown has been triggered and if ready to shutdown", since = "4.19.0", responseObject = ManagementServerMaintenanceResponse.class, - requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + httpMethod = "GET") public class ReadyForShutdownCmd extends BaseMSMaintenanceActionCmd { public static final String APINAME = "readyForShutdown"; diff --git a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/VerifyOAuthCodeAndGetUserCmd.java b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/VerifyOAuthCodeAndGetUserCmd.java index bd49f87d627..b3d2d335ba2 100644 --- a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/VerifyOAuthCodeAndGetUserCmd.java +++ b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/VerifyOAuthCodeAndGetUserCmd.java @@ -20,8 +20,10 @@ import java.net.InetAddress; import java.util.List; import java.util.Map; -import com.cloud.api.response.ApiResponseSerializer; -import com.cloud.user.Account; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.servlet.http.HttpSession; + import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,13 +39,13 @@ import org.apache.cloudstack.oauth2.OAuth2AuthManager; import org.apache.cloudstack.oauth2.api.response.OauthProviderResponse; import org.apache.commons.lang.ArrayUtils; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.servlet.http.HttpSession; +import com.cloud.api.response.ApiResponseSerializer; +import com.cloud.user.Account; @APICommand(name = "verifyOAuthCodeAndGetUser", description = "Verify the OAuth Code and fetch the corresponding user from provider", responseObject = OauthProviderResponse.class, entityType = {}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, - authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, since = "4.19.0") + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, since = "4.19.0", + httpMethod = "GET") public class VerifyOAuthCodeAndGetUserCmd extends BaseListCmd implements APIAuthenticator { ///////////////////////////////////////////////////// diff --git a/server/src/main/java/com/cloud/api/ApiServlet.java b/server/src/main/java/com/cloud/api/ApiServlet.java index db17daaf146..158df224071 100644 --- a/server/src/main/java/com/cloud/api/ApiServlet.java +++ b/server/src/main/java/com/cloud/api/ApiServlet.java @@ -25,8 +25,8 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.regex.Pattern; import java.util.Set; +import java.util.regex.Pattern; import javax.inject.Inject; import javax.servlet.ServletConfig; @@ -52,10 +52,9 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.managed.context.ManagedContext; import org.apache.cloudstack.utils.consoleproxy.ConsoleAccessUtils; import org.apache.commons.collections.MapUtils; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; import org.apache.commons.lang3.EnumUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.Nullable; import org.springframework.stereotype.Component; import org.springframework.web.context.support.SpringBeanAutowiringSupport; @@ -70,12 +69,12 @@ import com.cloud.user.AccountManagerImpl; import com.cloud.user.AccountService; import com.cloud.user.User; import com.cloud.user.UserAccount; - import com.cloud.utils.HttpUtils; -import com.cloud.utils.HttpUtils.ApiSessionKeySameSite; import com.cloud.utils.HttpUtils.ApiSessionKeyCheckOption; +import com.cloud.utils.HttpUtils.ApiSessionKeySameSite; import com.cloud.utils.StringUtils; import com.cloud.utils.db.EntityManager; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; @Component("apiServlet") @@ -84,9 +83,7 @@ public class ApiServlet extends HttpServlet { private static final Logger ACCESSLOGGER = LogManager.getLogger("apiserver." + ApiServlet.class.getName()); private static final String REPLACEMENT = "_"; private static final String LOGGER_REPLACEMENTS = "[\n\r\t]"; - private static final Pattern GET_REQUEST_COMMANDS = Pattern.compile("^(get|list|query|find)(\\w+)+$"); - private static final HashSet GET_REQUEST_COMMANDS_LIST = new HashSet<>(Set.of("isaccountallowedtocreateofferingswithtags", - "readyforshutdown", "cloudianisenabled", "quotabalance", "quotasummary", "quotatarifflist", "quotaisenabled", "quotastatement", "verifyoauthcodeandgetuser")); + public static final Pattern GET_REQUEST_COMMANDS = Pattern.compile("^(get|list|query|find)(\\w+)+$"); private static final HashSet POST_REQUESTS_TO_DISABLE_LOGGING = new HashSet<>(Set.of( "login", "oauthlogin", @@ -367,7 +364,7 @@ public class ApiServlet extends HttpServlet { } } - if (apiServer.isPostRequestsAndTimestampsEnforced() && !isStateChangingCommandUsingPOST(command, req.getMethod(), params)) { + if (apiServer.isPostRequestsAndTimestampsEnforced() && isStateChangingCommandNotUsingPOST(command, req.getMethod(), params)) { String errorText = String.format("State changing command %s needs to be sent using POST request", command); if (command.equalsIgnoreCase("updateConfiguration") && params.containsKey("name")) { errorText = String.format("Changes for configuration %s needs to be sent using POST request", params.get("name")[0]); @@ -485,13 +482,32 @@ public class ApiServlet extends HttpServlet { return verify2FA; } - private boolean isStateChangingCommandUsingPOST(String command, String method, Map params) { - if (command == null || (!GET_REQUEST_COMMANDS.matcher(command.toLowerCase()).matches() && !GET_REQUEST_COMMANDS_LIST.contains(command.toLowerCase()) - && !command.equalsIgnoreCase("updateConfiguration") && !method.equals("POST"))) { + protected boolean isStateChangingCommandNotUsingPOST(String command, String method, Map params) { + if (BaseCmd.HTTPMethod.POST.toString().equalsIgnoreCase(method)) { return false; } - return !command.equalsIgnoreCase("updateConfiguration") || method.equals("POST") || (params.containsKey("name") - && params.get("name")[0].toString().equalsIgnoreCase(ApiServer.EnforcePostRequestsAndTimestamps.key())); + if (command == null || method == null) { + return true; + } + String commandHttpMethod = null; + try { + Class cmdClass = apiServer.getCmdClass(command); + if (cmdClass != null) { + APICommand at = cmdClass.getAnnotation(APICommand.class); + if (at != null && org.apache.commons.lang3.StringUtils.isNotBlank(at.httpMethod())) { + commandHttpMethod = at.httpMethod(); + } + } + } catch (CloudRuntimeException e) { + LOGGER.trace("Command class not found for {}; falling back to pattern match", command, e); + } + if (BaseCmd.HTTPMethod.GET.toString().equalsIgnoreCase(commandHttpMethod) || + GET_REQUEST_COMMANDS.matcher(command.toLowerCase()).matches()) { + return false; + } + return !command.equalsIgnoreCase("updateConfiguration") || + !params.containsKey("name") || + !ApiServer.EnforcePostRequestsAndTimestamps.key().equalsIgnoreCase(params.get("name")[0].toString()); } protected boolean skip2FAcheckForAPIs(String command) { diff --git a/server/src/test/java/com/cloud/api/ApiServletTest.java b/server/src/test/java/com/cloud/api/ApiServletTest.java index 79fe4b86f85..c5ee9f58154 100644 --- a/server/src/test/java/com/cloud/api/ApiServletTest.java +++ b/server/src/test/java/com/cloud/api/ApiServletTest.java @@ -16,36 +16,8 @@ // under the License. package com.cloud.api; -import com.cloud.api.auth.ListUserTwoFactorAuthenticatorProvidersCmd; -import com.cloud.api.auth.SetupUserTwoFactorAuthenticationCmd; -import com.cloud.api.auth.ValidateUserTwoFactorAuthenticationCodeCmd; -import com.cloud.server.ManagementServer; -import com.cloud.user.Account; -import com.cloud.user.AccountManagerImpl; -import com.cloud.user.AccountService; -import com.cloud.user.User; -import com.cloud.user.UserAccount; -import com.cloud.utils.HttpUtils; -import com.cloud.vm.UserVmManager; -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.auth.APIAuthenticationManager; -import org.apache.cloudstack.api.auth.APIAuthenticationType; -import org.apache.cloudstack.api.auth.APIAuthenticator; -import org.apache.cloudstack.api.command.admin.config.ListCfgsByCmd; -import org.apache.cloudstack.framework.config.ConfigKey; -import org.apache.cloudstack.framework.config.impl.ConfigDepotImpl; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.junit.MockitoJUnitRunner; +import static org.mockito.ArgumentMatchers.nullable; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.servlet.http.HttpSession; import java.io.IOException; import java.io.PrintWriter; import java.io.StringWriter; @@ -57,11 +29,46 @@ import java.net.UnknownHostException; import java.util.HashMap; import java.util.Map; -import static org.mockito.ArgumentMatchers.nullable; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.servlet.http.HttpSession; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.auth.APIAuthenticationManager; +import org.apache.cloudstack.api.auth.APIAuthenticationType; +import org.apache.cloudstack.api.auth.APIAuthenticator; +import org.apache.cloudstack.api.command.admin.config.ListCfgsByCmd; +import org.apache.cloudstack.api.command.admin.offering.IsAccountAllowedToCreateOfferingsWithTagsCmd; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.impl.ConfigDepotImpl; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.api.auth.ListUserTwoFactorAuthenticatorProvidersCmd; +import com.cloud.api.auth.SetupUserTwoFactorAuthenticationCmd; +import com.cloud.api.auth.ValidateUserTwoFactorAuthenticationCodeCmd; +import com.cloud.server.ManagementServer; +import com.cloud.user.Account; +import com.cloud.user.AccountManagerImpl; +import com.cloud.user.AccountService; +import com.cloud.user.User; +import com.cloud.user.UserAccount; +import com.cloud.utils.HttpUtils; +import com.cloud.vm.UserVmManager; @RunWith(MockitoJUnitRunner.class) public class ApiServletTest { + private static final String[] STATE_CHANGING_COMMAND_CHECK_NAME_PARAM = + {ApiServer.EnforcePostRequestsAndTimestamps.key()}; + @Mock ApiServer apiServer; @@ -461,4 +468,88 @@ public class ApiServletTest { Assert.assertEquals(false, result); } + + @Test + public void isStateChangingCommandNotUsingPOSTReturnsFalseForPostMethod() { + String command = "updateConfiguration"; + String method = "POST"; + Map params = new HashMap<>(); + + boolean result = servlet.isStateChangingCommandNotUsingPOST(command, method, params); + + Assert.assertFalse(result); + } + + @Test + public void isStateChangingCommandNotUsingPOSTReturnsTrueForNullCommandAndMethod() { + String command = null; + String method = null; + Map params = new HashMap<>(); + + boolean result = servlet.isStateChangingCommandNotUsingPOST(command, method, params); + + Assert.assertTrue(result); + } + + @Test + public void isStateChangingCommandNotUsingPOSTReturnsFalseForGetHttpMethodAnnotation() { + String command = "isAccountAllowedToCreateOfferingsWithTags"; + String method = "GET"; + Map params = new HashMap<>(); + Class cmdClass = IsAccountAllowedToCreateOfferingsWithTagsCmd.class; + APICommand apiCommand = cmdClass.getAnnotation(APICommand.class); + Mockito.doReturn(cmdClass).when(apiServer).getCmdClass(command); + Assert.assertNotNull(apiCommand); + Assert.assertEquals("GET", apiCommand.httpMethod()); + boolean result = servlet.isStateChangingCommandNotUsingPOST(command, method, params); + Assert.assertFalse(result); + } + + @Test + public void isStateChangingCommandNotUsingPOSTReturnsFalseForMatchingGetRequestPattern() { + String command = "listZones"; + String method = "GET"; + Map params = new HashMap<>(); + boolean result = servlet.isStateChangingCommandNotUsingPOST(command, method, params); + Assert.assertFalse(result); + } + + @Test + public void isStateChangingCommandNotUsingPOSTReturnsTrueForMissingNameParameter() { + String command = "updateConfiguration"; + String method = "GET"; + Map params = new HashMap<>(); + boolean result = servlet.isStateChangingCommandNotUsingPOST(command, method, params); + Assert.assertTrue(result); + } + + @Test + public void isStateChangingCommandNotUsingPOSTReturnsFalseForUpdateConfigurationEnforcePostRequestsKey() { + String command = "updateConfiguration"; + String method = "GET"; + Map params = new HashMap<>(); + params.put("name", STATE_CHANGING_COMMAND_CHECK_NAME_PARAM); + boolean result = servlet.isStateChangingCommandNotUsingPOST(command, method, params); + Assert.assertFalse(result); + } + + @Test + public void isStateChangingCommandNotUsingPOSTReturnsFalseForWrongApiEnforcePostRequestsKey() { + String command = "updateSomeApi"; + String method = "GET"; + Map params = new HashMap<>(); + params.put("name", STATE_CHANGING_COMMAND_CHECK_NAME_PARAM); + boolean result = servlet.isStateChangingCommandNotUsingPOST(command, method, params); + Assert.assertTrue(result); + } + + @Test + public void isStateChangingCommandNotUsingPOSTReturnsFalseForUpdateConfigurationNonEnforcePostRequestsKey() { + String command = "updateConfiguration"; + String method = "GET"; + Map params = new HashMap<>(); + params.put("name", new String[] { "key" }); + boolean result = servlet.isStateChangingCommandNotUsingPOST(command, method, params); + Assert.assertTrue(result); + } } From 664f76c7e4b61489927abb6b5e859d79c47662a4 Mon Sep 17 00:00:00 2001 From: Nicolas Vazquez Date: Thu, 29 Jan 2026 05:24:58 -0300 Subject: [PATCH 097/126] Fix KvmSshToAgentEnabled setting description and make it dynamic (#12533) --- .../java/com/cloud/resource/ResourceManager.java | 4 ++-- .../resources/META-INF/db/schema-42020to42030.sql | 3 +++ .../com/cloud/resource/ResourceManagerImpl.java | 3 +-- .../cloud/resource/ResourceManagerImplTest.java | 14 +++++++++++--- 4 files changed, 17 insertions(+), 7 deletions(-) diff --git a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java index 89dc4badcbc..936e8b3448e 100755 --- a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java +++ b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java @@ -51,8 +51,8 @@ public interface ResourceManager extends ResourceService, Configurable { ConfigKey KvmSshToAgentEnabled = new ConfigKey<>("Advanced", Boolean.class, "kvm.ssh.to.agent","true", - "Number of retries when preparing a host into Maintenance Mode is faulty before failing", - false); + "True if the management server will restart the agent service via SSH into the KVM hosts after or during maintenance operations", + true); ConfigKey HOST_MAINTENANCE_LOCAL_STRATEGY = new ConfigKey<>(String.class, "host.maintenance.local.storage.strategy", "Advanced","Error", diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42020to42030.sql b/engine/schema/src/main/resources/META-INF/db/schema-42020to42030.sql index 567e623564e..5eec97278ba 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42020to42030.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42020to42030.sql @@ -23,3 +23,6 @@ ALTER TABLE `cloud`.`template_store_ref` MODIFY COLUMN `download_url` varchar(20 UPDATE `cloud`.`alert` SET type = 33 WHERE name = 'ALERT.VR.PUBLIC.IFACE.MTU'; UPDATE `cloud`.`alert` SET type = 34 WHERE name = 'ALERT.VR.PRIVATE.IFACE.MTU'; + +-- Update configuration 'kvm.ssh.to.agent' description and is_dynamic fields +UPDATE `cloud`.`configuration` SET description = 'True if the management server will restart the agent service via SSH into the KVM hosts after or during maintenance operations', is_dynamic = 1 WHERE name = 'kvm.ssh.to.agent'; diff --git a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java index a77ecfcb7fe..c076ab7c893 100755 --- a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java @@ -2920,8 +2920,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, if (!isAgentOnHost || vmsMigrating || host.getStatus() == Status.Up) { return; } - final boolean sshToAgent = Boolean.parseBoolean(_configDao.getValue(KvmSshToAgentEnabled.key())); - if (sshToAgent) { + if (KvmSshToAgentEnabled.value()) { Ternary credentials = getHostCredentials(host); connectAndRestartAgentOnHost(host, credentials.first(), credentials.second(), credentials.third()); } else { diff --git a/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java b/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java index 5b7353bded6..1669d7a47d9 100644 --- a/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java +++ b/server/src/test/java/com/cloud/resource/ResourceManagerImplTest.java @@ -45,6 +45,7 @@ import com.cloud.vm.dao.VMInstanceDao; import com.trilead.ssh2.Connection; import org.apache.cloudstack.api.command.admin.host.CancelHostAsDegradedCmd; import org.apache.cloudstack.api.command.admin.host.DeclareHostAsDegradedCmd; +import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.junit.After; import org.junit.Assert; @@ -61,6 +62,7 @@ import org.mockito.MockitoAnnotations; import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; +import java.lang.reflect.Field; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -152,6 +154,12 @@ public class ResourceManagerImplTest { private MockedConstruction getVncPortCommandMockedConstruction; private AutoCloseable closeable; + private void overrideDefaultConfigValue(final ConfigKey configKey, final String name, final Object o) throws IllegalAccessException, NoSuchFieldException { + Field f = ConfigKey.class.getDeclaredField(name); + f.setAccessible(true); + f.set(configKey, o); + } + @Before public void setup() throws Exception { closeable = MockitoAnnotations.openMocks(this); @@ -194,7 +202,7 @@ public class ResourceManagerImplTest { eq("service cloudstack-agent restart"))). willReturn(new SSHCmdHelper.SSHCmdResult(0,"","")); - when(configurationDao.getValue(ResourceManager.KvmSshToAgentEnabled.key())).thenReturn("true"); + overrideDefaultConfigValue(ResourceManager.KvmSshToAgentEnabled, "_defaultValue", "true"); rootDisks = Arrays.asList(rootDisk1, rootDisk2); dataDisks = Collections.singletonList(dataDisk); @@ -372,9 +380,9 @@ public class ResourceManagerImplTest { } @Test(expected = CloudRuntimeException.class) - public void testHandleAgentSSHDisabledNotConnectedAgent() { + public void testHandleAgentSSHDisabledNotConnectedAgent() throws NoSuchFieldException, IllegalAccessException { when(host.getStatus()).thenReturn(Status.Disconnected); - when(configurationDao.getValue(ResourceManager.KvmSshToAgentEnabled.key())).thenReturn("false"); + overrideDefaultConfigValue(ResourceManager.KvmSshToAgentEnabled, "_defaultValue", "false"); resourceManager.handleAgentIfNotConnected(host, false); } From 26b57655ecea10d65689084a9bf7a2285b744697 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Thu, 29 Jan 2026 13:59:41 +0530 Subject: [PATCH 098/126] Deployment plan fixes for VM with last host, and last host in maintenance (#12062) * Deployment plan fixes for VM with last host - Consider last host when it is not in maintenance - Fail deployment when user requests for last host consideration and last host doesn't exists or in maintenance * changes * msg update with vm/host name * address comments * Exclude last hosts with error or degraded state as well, for vm deploy * review changes --- .../deploy/DeploymentPlanningManagerImpl.java | 80 ++++++++++--------- .../cloud/ha/HighAvailabilityManagerImpl.java | 4 +- .../cloud/resource/ResourceManagerImpl.java | 2 +- 3 files changed, 45 insertions(+), 41 deletions(-) diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index e7b926eb4e4..6881fbab98c 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -36,6 +36,7 @@ import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.resource.ResourceState; import org.apache.cloudstack.affinity.AffinityGroupDomainMapVO; import org.apache.cloudstack.affinity.AffinityGroupProcessor; import org.apache.cloudstack.affinity.AffinityGroupService; @@ -378,22 +379,12 @@ StateListener, Configurable { planner = getDeploymentPlannerByName(plannerName); } - Host lastHost = null; - - String considerLastHostStr = (String)vmProfile.getParameter(VirtualMachineProfile.Param.ConsiderLastHost); - boolean considerLastHost = vm.getLastHostId() != null && haVmTag == null && - (considerLastHostStr == null || Boolean.TRUE.toString().equalsIgnoreCase(considerLastHostStr)); - if (considerLastHost) { - HostVO host = _hostDao.findById(vm.getLastHostId()); - logger.debug("This VM has last host_id specified, trying to choose the same host: " + host); - lastHost = host; - - DeployDestination deployDestination = deployInVmLastHost(vmProfile, plan, avoids, planner, vm, dc, offering, cpuRequested, ramRequested, volumesRequireEncryption); - if (deployDestination != null) { - return deployDestination; - } + DeployDestination deployDestinationForVmLasthost = deployInVmLastHost(vmProfile, plan, avoids, planner, vm, dc, offering, cpuRequested, ramRequested, volumesRequireEncryption); + if (deployDestinationForVmLasthost != null) { + return deployDestinationForVmLasthost; } + HostVO lastHost = _hostDao.findById(vm.getLastHostId()); avoidOtherClustersForDeploymentIfMigrationDisabled(vm, lastHost, avoids); DeployDestination dest = null; @@ -475,47 +466,56 @@ StateListener, Configurable { private DeployDestination deployInVmLastHost(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids, DeploymentPlanner planner, VirtualMachine vm, DataCenter dc, ServiceOffering offering, int cpuRequested, long ramRequested, boolean volumesRequireEncryption) throws InsufficientServerCapacityException { - HostVO host = _hostDao.findById(vm.getLastHostId()); - if (canUseLastHost(host, avoids, plan, vm, offering, volumesRequireEncryption)) { - _hostDao.loadHostTags(host); - _hostDao.loadDetails(host); - if (host.getStatus() != Status.Up) { + String considerLastHostStr = (String)vmProfile.getParameter(VirtualMachineProfile.Param.ConsiderLastHost); + String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag); + boolean considerLastHost = vm.getLastHostId() != null && haVmTag == null && + !(Boolean.FALSE.toString().equalsIgnoreCase(considerLastHostStr)); + if (!considerLastHost) { + return null; + } + + logger.debug("This VM has last host_id: {}", vm.getLastHostId()); + HostVO lastHost = _hostDao.findById(vm.getLastHostId()); + if (canUseLastHost(lastHost, avoids, plan, vm, offering, volumesRequireEncryption)) { + _hostDao.loadHostTags(lastHost); + _hostDao.loadDetails(lastHost); + if (lastHost.getStatus() != Status.Up) { logger.debug("Cannot deploy VM [{}] to the last host [{}] because this host is not in UP state or is not enabled. Host current status [{}] and resource status [{}].", - vm, host, host.getState().name(), host.getResourceState()); + vm, lastHost, lastHost.getState().name(), lastHost.getResourceState()); return null; } - if (checkVmProfileAndHost(vmProfile, host)) { - long cluster_id = host.getClusterId(); + if (checkVmProfileAndHost(vmProfile, lastHost)) { + long cluster_id = lastHost.getClusterId(); ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio"); ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio"); float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); boolean hostHasCpuCapability, hostHasCapacity = false; - hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(host, offering.getCpu(), offering.getSpeed()); + hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(lastHost, offering.getCpu(), offering.getSpeed()); if (hostHasCpuCapability) { // first check from reserved capacity - hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host, cpuRequested, ramRequested, true, cpuOvercommitRatio, memoryOvercommitRatio, true); + hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(lastHost, cpuRequested, ramRequested, true, cpuOvercommitRatio, memoryOvercommitRatio, true); // if not reserved, check the free capacity if (!hostHasCapacity) - hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host, cpuRequested, ramRequested, false, cpuOvercommitRatio, memoryOvercommitRatio, true); + hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(lastHost, cpuRequested, ramRequested, false, cpuOvercommitRatio, memoryOvercommitRatio, true); } boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); if (!hostHasCapacity || !hostHasCpuCapability) { - logger.debug("Cannot deploy VM [{}] to the last host [{}] because this host does not have enough capacity to deploy this VM.", vm, host); + logger.debug("Cannot deploy VM [{}] to the last host [{}] because this host does not have enough capacity to deploy this VM.", vm, lastHost); return null; } - Pod pod = _podDao.findById(host.getPodId()); - Cluster cluster = _clusterDao.findById(host.getClusterId()); + Pod pod = _podDao.findById(lastHost.getPodId()); + Cluster cluster = _clusterDao.findById(lastHost.getClusterId()); logger.debug("Last host [{}] of VM [{}] is UP and has enough capacity. Checking for suitable pools for this host under zone [{}], pod [{}] and cluster [{}].", - host, vm, dc, pod, cluster); + lastHost, vm, dc, pod, cluster); if (vm.getHypervisorType() == HypervisorType.BareMetal) { - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<>(), displayStorage); + DeployDestination dest = new DeployDestination(dc, pod, cluster, lastHost, new HashMap<>(), displayStorage); logger.debug("Returning Deployment Destination: {}.", dest); return dest; } @@ -523,8 +523,8 @@ StateListener, Configurable { // search for storage under the zone, pod, cluster // of // the last host. - DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), - host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null); + DataCenterDeployment lastPlan = new DataCenterDeployment(lastHost.getDataCenterId(), + lastHost.getPodId(), lastHost.getClusterId(), lastHost.getId(), plan.getPoolId(), null); Pair>, List> result = findSuitablePoolsForVolumes( vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); Map> suitableVolumeStoragePools = result.first(); @@ -533,11 +533,11 @@ StateListener, Configurable { // choose the potential pool for this VM for this // host if (suitableVolumeStoragePools.isEmpty()) { - logger.debug("Cannot find suitable storage pools in host [{}] to deploy VM [{}]", host, vm); + logger.debug("Cannot find suitable storage pools in host [{}] to deploy VM [{}]", lastHost, vm); return null; } List suitableHosts = new ArrayList<>(); - suitableHosts.add(host); + suitableHosts.add(lastHost); Pair> potentialResources = findPotentialDeploymentResources( suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes, plan.getPreferredHosts(), vm); @@ -550,7 +550,7 @@ StateListener, Configurable { for (Volume vol : readyAndReusedVolumes) { storageVolMap.remove(vol); } - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap, displayStorage); + DeployDestination dest = new DeployDestination(dc, pod, cluster, lastHost, storageVolMap, displayStorage); logger.debug("Returning Deployment Destination: {}", dest); return dest; } @@ -562,7 +562,7 @@ StateListener, Configurable { private boolean canUseLastHost(HostVO host, ExcludeList avoids, DeploymentPlan plan, VirtualMachine vm, ServiceOffering offering, boolean volumesRequireEncryption) { if (host == null) { - logger.warn("Could not find last host of VM [{}] with id [{}]. Skipping this and trying other available hosts.", vm, vm.getLastHostId()); + logger.warn("Could not find last host of VM [{}] with id [{}]. Skipping it", vm, vm.getLastHostId()); return false; } @@ -576,6 +576,12 @@ StateListener, Configurable { return false; } + logger.debug("VM's last host is {}, trying to choose the same host if it is not in maintenance, error or degraded state", host); + if (host.isInMaintenanceStates() || Arrays.asList(ResourceState.Error, ResourceState.Degraded).contains(host.getResourceState())) { + logger.debug("Unable to deploy VM {} in the last host, last host {} is in {} state", vm.getName(), host.getName(), host.getResourceState()); + return false; + } + if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { logger.debug("Cannot deploy VM [{}] in the last host [{}] because this host already has the max number of running VMs (users and system VMs). Skipping this and trying other available hosts.", vm, host); @@ -1474,7 +1480,7 @@ StateListener, Configurable { protected Pair> findPotentialDeploymentResources(List suitableHosts, Map> suitableVolumeStoragePools, ExcludeList avoid, PlannerResourceUsage resourceUsageRequired, List readyAndReusedVolumes, List preferredHosts, VirtualMachine vm) { - logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM"); + logger.debug("Trying to find a potential host and associated storage pools from the suitable host/pool lists for this VM"); boolean hostCanAccessPool = false; boolean haveEnoughSpace = false; diff --git a/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java b/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java index b0f3e6d8d69..e3f67420a2a 100644 --- a/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java +++ b/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java @@ -833,7 +833,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur if (checkAndCancelWorkIfNeeded(work)) { return null; } - logger.info("Migration attempt: for VM {}from host {}. Starting attempt: {}/{} times.", vm, srcHost, 1 + work.getTimesTried(), _maxRetries); + logger.info("Migration attempt: for {} from {}. Starting attempt: {}/{} times.", vm, srcHost, 1 + work.getTimesTried(), _maxRetries); if (VirtualMachine.State.Stopped.equals(vm.getState())) { logger.info(String.format("vm %s is Stopped, skipping migrate.", vm)); @@ -843,8 +843,6 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur logger.info(String.format("VM %s is running on a different host %s, skipping migration", vm, vm.getHostId())); return null; } - logger.info("Migration attempt: for VM " + vm.getUuid() + "from host id " + srcHostId + - ". Starting attempt: " + (1 + work.getTimesTried()) + "/" + _maxRetries + " times."); try { work.setStep(Step.Migrating); diff --git a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java index c076ab7c893..12ceac21322 100755 --- a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java @@ -1417,7 +1417,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, throw new CloudRuntimeException("There are active VMs using the host's local storage pool. Please stop all VMs on this host that use local storage."); } } else { - logger.info("Maintenance: scheduling migration of VM {} from host {}", vm, host); + logger.info("Maintenance: scheduling migration of {} from {}", vm, host); _haMgr.scheduleMigration(vm, HighAvailabilityManager.ReasonType.HostMaintenance); } } From c681d0d0a2f53a7ef52de5cc2a4bb0d7d2aa864a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bernardo=20De=20Marco=20Gon=C3=A7alves?= Date: Thu, 29 Jan 2026 06:01:54 -0300 Subject: [PATCH 099/126] Change `vmsnapshot.max` setting scope to the account level (#11616) --- .../com/cloud/vm/snapshot/VMSnapshotManager.java | 2 +- .../cloud/vm/snapshot/VMSnapshotManagerImpl.java | 7 ++++--- .../cloud/vm/snapshot/VMSnapshotManagerTest.java | 15 +++++++++++++++ 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/engine/components-api/src/main/java/com/cloud/vm/snapshot/VMSnapshotManager.java b/engine/components-api/src/main/java/com/cloud/vm/snapshot/VMSnapshotManager.java index 6478469f190..6831552b83d 100644 --- a/engine/components-api/src/main/java/com/cloud/vm/snapshot/VMSnapshotManager.java +++ b/engine/components-api/src/main/java/com/cloud/vm/snapshot/VMSnapshotManager.java @@ -31,7 +31,7 @@ public interface VMSnapshotManager extends VMSnapshotService, Manager { static final ConfigKey VMSnapshotExpireInterval = new ConfigKey("Advanced", Integer.class, "vmsnapshot.expire.interval", "-1", "VM Snapshot expire interval in hours", true, ConfigKey.Scope.Account); - ConfigKey VMSnapshotMax = new ConfigKey("Advanced", Integer.class, "vmsnapshot.max", "10", "Maximum vm snapshots for a single vm", true, ConfigKey.Scope.Global); + ConfigKey VMSnapshotMax = new ConfigKey("Advanced", Integer.class, "vmsnapshot.max", "10", "Maximum VM snapshots for a single VM", true, ConfigKey.Scope.Account); /** * Delete all VM snapshots belonging to one VM diff --git a/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java b/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java index 617a4e54a6e..d5e25adcd5f 100644 --- a/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java @@ -400,10 +400,11 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme _accountMgr.checkAccess(caller, null, true, userVmVo); // check max snapshot limit for per VM - int vmSnapshotMax = VMSnapshotManager.VMSnapshotMax.value(); - + boolean vmBelongsToProject = _accountMgr.getAccount(userVmVo.getAccountId()).getType() == Account.Type.PROJECT; + long accountIdToRetrieveConfigurationValueFrom = vmBelongsToProject ? caller.getId() : userVmVo.getAccountId(); + int vmSnapshotMax = VMSnapshotManager.VMSnapshotMax.valueIn(accountIdToRetrieveConfigurationValueFrom); if (_vmSnapshotDao.findByVm(vmId).size() >= vmSnapshotMax) { - throw new CloudRuntimeException("Creating Instance Snapshot failed due to a Instance can just have : " + vmSnapshotMax + " Instance Snapshots. Please delete old ones"); + throw new CloudRuntimeException(String.format("Each VM can have at most [%s] VM snapshots.", vmSnapshotMax)); } // check if there are active volume snapshots tasks diff --git a/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java b/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java index a0f09981a40..b696d743ac6 100644 --- a/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java +++ b/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java @@ -41,6 +41,7 @@ import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.user.AccountManager; +import com.cloud.user.AccountVO; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; import com.cloud.uservm.UserVm; @@ -136,6 +137,8 @@ public class VMSnapshotManagerTest { VMSnapshotDetailsDao _vmSnapshotDetailsDao; @Mock UserVmManager _userVmManager; + @Mock + private AccountVO accountVOMock; private static final long TEST_VM_ID = 3L; private static final long SERVICE_OFFERING_ID = 1L; @@ -285,8 +288,12 @@ public class VMSnapshotManagerTest { @SuppressWarnings("unchecked") @Test(expected = CloudRuntimeException.class) public void testAllocVMSnapshotF4() throws ResourceAllocationException { + long accountId = 1L; List mockList = mock(List.class); when(mockList.size()).thenReturn(10); + when(_userVMDao.findById(TEST_VM_ID)).thenReturn(vmMock); + when(userVm.getAccountId()).thenReturn(accountId); + when(_accountMgr.getAccount(accountId)).thenReturn(accountVOMock); when(_vmSnapshotDao.findByVm(TEST_VM_ID)).thenReturn(mockList); _vmSnapshotMgr.allocVMSnapshot(TEST_VM_ID, "", "", true); } @@ -295,8 +302,12 @@ public class VMSnapshotManagerTest { @SuppressWarnings("unchecked") @Test(expected = CloudRuntimeException.class) public void testAllocVMSnapshotF5() throws ResourceAllocationException { + long accountId = 1L; List mockList = mock(List.class); when(mockList.size()).thenReturn(1); + when(_userVMDao.findById(TEST_VM_ID)).thenReturn(vmMock); + when(userVm.getAccountId()).thenReturn(accountId); + when(_accountMgr.getAccount(accountId)).thenReturn(accountVOMock); when(_snapshotDao.listByInstanceId(TEST_VM_ID, Snapshot.State.Creating, Snapshot.State.CreatedOnPrimary, Snapshot.State.BackingUp)).thenReturn(mockList); _vmSnapshotMgr.allocVMSnapshot(TEST_VM_ID, "", "", true); } @@ -304,6 +315,10 @@ public class VMSnapshotManagerTest { // successful creation case @Test public void testCreateVMSnapshot() throws AgentUnavailableException, OperationTimedoutException, ResourceAllocationException, NoTransitionException { + long accountId = 1L; + when(_userVMDao.findById(TEST_VM_ID)).thenReturn(vmMock); + when(userVm.getAccountId()).thenReturn(accountId); + when(_accountMgr.getAccount(accountId)).thenReturn(accountVOMock); when(vmMock.getState()).thenReturn(State.Running); _vmSnapshotMgr.allocVMSnapshot(TEST_VM_ID, "", "", true); } From 6ba5e082217ab7af0ee5042bfdff1053fad41430 Mon Sep 17 00:00:00 2001 From: Rene Peinthor Date: Thu, 29 Jan 2026 10:08:12 +0100 Subject: [PATCH 100/126] Linstor: support live migration from other primary storage (#12532) * Linstor: Refactor resource creation methods to LinstorUtil Move reusable methods from LinstorPrimaryDataStoreDriverImpl to LinstorUtil to enable sharing with other components: - logLinstorAnswer, logLinstorAnswers, checkLinstorAnswersThrow - getRscGrp, getEncryptedLayerList, applyQoSSettings - createResourceBase, createResource, spawnResource - canShareTemplateForResourceGroup, foundShareableTemplate Add LIN_PROP_DRBDOPT_EXACT_SIZE constant and exactSize parameter support for DRBD exact-size property handling during resource creation. * Linstor: Add LinstorDataMotionStrategy for VM live migration Implement DataMotionStrategy for live migration of VMs with volumes on Linstor or other primary storage. Key features: - Support live migration with storage from other primary storages - Preserve DRBD exact-size property during migration --- plugins/storage/volume/linstor/CHANGELOG.md | 6 + .../LinstorPrimaryDataStoreDriverImpl.java | 316 +------------ .../storage/datastore/util/LinstorUtil.java | 293 +++++++++++- .../motion/LinstorDataMotionStrategy.java | 437 ++++++++++++++++++ .../spring-storage-volume-linstor-context.xml | 2 + ...LinstorPrimaryDataStoreDriverImplTest.java | 7 +- 6 files changed, 764 insertions(+), 297 deletions(-) create mode 100644 plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/motion/LinstorDataMotionStrategy.java diff --git a/plugins/storage/volume/linstor/CHANGELOG.md b/plugins/storage/volume/linstor/CHANGELOG.md index 47d1ddeb06c..a5d609325d6 100644 --- a/plugins/storage/volume/linstor/CHANGELOG.md +++ b/plugins/storage/volume/linstor/CHANGELOG.md @@ -5,6 +5,12 @@ All notable changes to Linstor CloudStack plugin will be documented in this file The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [2026-01-17] + +### Added + +- Support live migrate from other primary storage + ## [2025-12-18] ### Changed diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java index c2bce6e5a04..27b5f7ef7ec 100644 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java @@ -21,33 +21,25 @@ import com.linbit.linstor.api.CloneWaiter; import com.linbit.linstor.api.DevelopersApi; import com.linbit.linstor.api.model.ApiCallRc; import com.linbit.linstor.api.model.ApiCallRcList; -import com.linbit.linstor.api.model.AutoSelectFilter; import com.linbit.linstor.api.model.LayerType; -import com.linbit.linstor.api.model.Properties; import com.linbit.linstor.api.model.ResourceDefinition; import com.linbit.linstor.api.model.ResourceDefinitionCloneRequest; import com.linbit.linstor.api.model.ResourceDefinitionCloneStarted; import com.linbit.linstor.api.model.ResourceDefinitionCreate; import com.linbit.linstor.api.model.ResourceDefinitionModify; -import com.linbit.linstor.api.model.ResourceGroup; -import com.linbit.linstor.api.model.ResourceGroupSpawn; import com.linbit.linstor.api.model.ResourceMakeAvailable; import com.linbit.linstor.api.model.ResourceWithVolumes; import com.linbit.linstor.api.model.Snapshot; import com.linbit.linstor.api.model.SnapshotRestore; -import com.linbit.linstor.api.model.VolumeDefinition; import com.linbit.linstor.api.model.VolumeDefinitionModify; import javax.annotation.Nonnull; -import javax.annotation.Nullable; import javax.inject.Inject; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Optional; import java.util.stream.Collectors; @@ -117,10 +109,9 @@ import org.apache.cloudstack.storage.snapshot.SnapshotObject; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.storage.volume.VolumeObject; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; import org.apache.commons.collections.CollectionUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import java.nio.charset.StandardCharsets; @@ -335,275 +326,11 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver } } - private void logLinstorAnswer(@Nonnull ApiCallRc answer) { - if (answer.isError()) { - logger.error(answer.getMessage()); - } else if (answer.isWarning()) { - logger.warn(answer.getMessage()); - } else if (answer.isInfo()) { - logger.info(answer.getMessage()); - } - } - - private void logLinstorAnswers(@Nonnull ApiCallRcList answers) { - answers.forEach(this::logLinstorAnswer); - } - - private void checkLinstorAnswersThrow(@Nonnull ApiCallRcList answers) { - logLinstorAnswers(answers); - if (answers.hasError()) - { - String errMsg = answers.stream() - .filter(ApiCallRc::isError) - .findFirst() - .map(ApiCallRc::getMessage).orElse("Unknown linstor error"); - throw new CloudRuntimeException(errMsg); - } - } - private String checkLinstorAnswers(@Nonnull ApiCallRcList answers) { - logLinstorAnswers(answers); + LinstorUtil.logLinstorAnswers(answers); return answers.stream().filter(ApiCallRc::isError).findFirst().map(ApiCallRc::getMessage).orElse(null); } - private void applyQoSSettings(StoragePoolVO storagePool, DevelopersApi api, String rscName, Long maxIops) - throws ApiException - { - Long currentQosIops = null; - List vlmDfns = api.volumeDefinitionList(rscName, null, null); - if (!vlmDfns.isEmpty()) - { - Properties props = vlmDfns.get(0).getProps(); - long iops = Long.parseLong(props.getOrDefault("sys/fs/blkio_throttle_write_iops", "0")); - currentQosIops = iops > 0 ? iops : null; - } - - if (!Objects.equals(maxIops, currentQosIops)) - { - VolumeDefinitionModify vdm = new VolumeDefinitionModify(); - if (maxIops != null) - { - Properties props = new Properties(); - props.put("sys/fs/blkio_throttle_read_iops", "" + maxIops); - props.put("sys/fs/blkio_throttle_write_iops", "" + maxIops); - vdm.overrideProps(props); - logger.info("Apply qos setting: " + maxIops + " to " + rscName); - } - else - { - logger.info("Remove QoS setting for " + rscName); - vdm.deleteProps(Arrays.asList("sys/fs/blkio_throttle_read_iops", "sys/fs/blkio_throttle_write_iops")); - } - ApiCallRcList answers = api.volumeDefinitionModify(rscName, 0, vdm); - checkLinstorAnswersThrow(answers); - - Long capacityIops = storagePool.getCapacityIops(); - if (capacityIops != null) - { - long vcIops = currentQosIops != null ? currentQosIops * -1 : 0; - long vMaxIops = maxIops != null ? maxIops : 0; - long newIops = vcIops + vMaxIops; - capacityIops -= newIops; - logger.info(String.format("Current storagepool %s iops capacity: %d", storagePool, capacityIops)); - storagePool.setCapacityIops(Math.max(0, capacityIops)); - _storagePoolDao.update(storagePool.getId(), storagePool); - } - } - } - - private String getRscGrp(StoragePool storagePool) { - return storagePool.getUserInfo() != null && !storagePool.getUserInfo().isEmpty() ? - storagePool.getUserInfo() : "DfltRscGrp"; - } - - /** - * Returns the layerlist of the resourceGroup with encryption(LUKS) added above STORAGE. - * If the resourceGroup layer list already contains LUKS this layer list will be returned. - * @param api Linstor developers API - * @param resourceGroup Resource group to get the encryption layer list - * @return layer list with LUKS added - */ - public List getEncryptedLayerList(DevelopersApi api, String resourceGroup) { - try { - List rscGrps = api.resourceGroupList( - Collections.singletonList(resourceGroup), Collections.emptyList(), null, null); - - if (CollectionUtils.isEmpty(rscGrps)) { - throw new CloudRuntimeException( - String.format("Resource Group %s not found on Linstor cluster.", resourceGroup)); - } - - final ResourceGroup rscGrp = rscGrps.get(0); - List layers = Arrays.asList(LayerType.DRBD, LayerType.LUKS, LayerType.STORAGE); - List curLayerStack = rscGrp.getSelectFilter() != null ? - rscGrp.getSelectFilter().getLayerStack() : Collections.emptyList(); - if (CollectionUtils.isNotEmpty(curLayerStack)) { - layers = curLayerStack.stream().map(LayerType::valueOf).collect(Collectors.toList()); - if (!layers.contains(LayerType.LUKS)) { - layers.add(layers.size() - 1, LayerType.LUKS); // lowest layer is STORAGE - } - } - return layers; - } catch (ApiException e) { - throw new CloudRuntimeException( - String.format("Resource Group %s not found on Linstor cluster.", resourceGroup)); - } - } - - /** - * Spawns a new Linstor resource with the given arguments. - * @param api - * @param newRscName - * @param sizeInBytes - * @param isTemplate - * @param rscGrpName - * @param volName - * @param vmName - * @throws ApiException - */ - private void spawnResource( - DevelopersApi api, String newRscName, long sizeInBytes, boolean isTemplate, String rscGrpName, - String volName, String vmName, @Nullable Long passPhraseId, @Nullable byte[] passPhrase) throws ApiException - { - ResourceGroupSpawn rscGrpSpawn = new ResourceGroupSpawn(); - rscGrpSpawn.setResourceDefinitionName(newRscName); - rscGrpSpawn.addVolumeSizesItem(sizeInBytes / 1024); - if (passPhraseId != null) { - AutoSelectFilter asf = new AutoSelectFilter(); - List luksLayers = getEncryptedLayerList(api, rscGrpName); - asf.setLayerStack(luksLayers.stream().map(LayerType::toString).collect(Collectors.toList())); - rscGrpSpawn.setSelectFilter(asf); - if (passPhrase != null) { - String utf8Passphrase = new String(passPhrase, StandardCharsets.UTF_8); - rscGrpSpawn.setVolumePassphrases(Collections.singletonList(utf8Passphrase)); - } - } - - if (isTemplate) { - Properties props = new Properties(); - props.put(LinstorUtil.getTemplateForAuxPropKey(rscGrpName), "true"); - rscGrpSpawn.setResourceDefinitionProps(props); - } - - logger.info("Linstor: Spawn resource " + newRscName); - ApiCallRcList answers = api.resourceGroupSpawn(rscGrpName, rscGrpSpawn); - checkLinstorAnswersThrow(answers); - - answers = LinstorUtil.applyAuxProps(api, newRscName, volName, vmName); - checkLinstorAnswersThrow(answers); - } - - /** - * Condition if a template resource can be shared with the given resource group. - * @param tgtRscGrp - * @param tgtLayerStack - * @param rg - * @return True if the template resource can be shared, else false. - */ - private boolean canShareTemplateForResourceGroup( - ResourceGroup tgtRscGrp, List tgtLayerStack, ResourceGroup rg) { - List rgLayerStack = rg.getSelectFilter() != null ? - rg.getSelectFilter().getLayerStack() : null; - return Objects.equals(tgtLayerStack, rgLayerStack) && - Objects.equals(tgtRscGrp.getSelectFilter().getStoragePoolList(), - rg.getSelectFilter().getStoragePoolList()); - } - - /** - * Searches for a shareable template for this rscGrpName and sets the aux template property. - * @param api - * @param rscName - * @param rscGrpName - * @param existingRDs - * @return - * @throws ApiException - */ - private boolean foundShareableTemplate( - DevelopersApi api, String rscName, String rscGrpName, - List> existingRDs) throws ApiException { - if (!existingRDs.isEmpty()) { - ResourceGroup tgtRscGrp = api.resourceGroupList( - Collections.singletonList(rscGrpName), null, null, null).get(0); - List tgtLayerStack = tgtRscGrp.getSelectFilter() != null ? - tgtRscGrp.getSelectFilter().getLayerStack() : null; - - // check if there is already a template copy, that we could reuse - // this means if select filters are similar enough to allow cloning from - for (Pair rdPair : existingRDs) { - ResourceGroup rg = rdPair.second(); - if (canShareTemplateForResourceGroup(tgtRscGrp, tgtLayerStack, rg)) { - LinstorUtil.setAuxTemplateForProperty(api, rscName, rscGrpName); - return true; - } - } - } - return false; - } - - /** - * Creates a new Linstor resource. - * @param rscName - * @param sizeInBytes - * @param volName - * @param vmName - * @param api - * @param rscGrp - * @param poolId - * @param isTemplate indicates if the resource is a template - * @return true if a new resource was created, false if it already existed or was reused. - */ - private boolean createResourceBase( - String rscName, long sizeInBytes, String volName, String vmName, - @Nullable Long passPhraseId, @Nullable byte[] passPhrase, DevelopersApi api, - String rscGrp, long poolId, boolean isTemplate) - { - try - { - logger.debug("createRscBase: {} :: {} :: {}", rscName, rscGrp, isTemplate); - List> existingRDs = LinstorUtil.getRDAndRGListStartingWith(api, rscName); - - String fullRscName = String.format("%s-%d", rscName, poolId); - boolean alreadyCreated = existingRDs.stream() - .anyMatch(p -> p.first().getName().equalsIgnoreCase(fullRscName)) || - existingRDs.stream().anyMatch(p -> p.first().getProps().containsKey(LinstorUtil.getTemplateForAuxPropKey(rscGrp))); - if (!alreadyCreated) { - boolean createNewRsc = !foundShareableTemplate(api, rscName, rscGrp, existingRDs); - if (createNewRsc) { - String newRscName = existingRDs.isEmpty() ? rscName : fullRscName; - spawnResource(api, newRscName, sizeInBytes, isTemplate, rscGrp, - volName, vmName, passPhraseId, passPhrase); - } - return createNewRsc; - } - return false; - } catch (ApiException apiEx) - { - logger.error("Linstor: ApiEx - " + apiEx.getMessage()); - throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); - } - } - - private String createResource(VolumeInfo vol, StoragePoolVO storagePoolVO) { - DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress()); - final String rscGrp = getRscGrp(storagePoolVO); - - final String rscName = LinstorUtil.RSC_PREFIX + vol.getUuid(); - createResourceBase( - rscName, vol.getSize(), vol.getName(), vol.getAttachedVmName(), vol.getPassphraseId(), vol.getPassphrase(), - linstorApi, rscGrp, storagePoolVO.getId(), false); - - try - { - applyQoSSettings(storagePoolVO, linstorApi, rscName, vol.getMaxIops()); - - return LinstorUtil.getDevicePath(linstorApi, rscName); - } catch (ApiException apiEx) - { - logger.error("Linstor: ApiEx - " + apiEx.getMessage()); - throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); - } - } - private void resizeResource(DevelopersApi api, String resourceName, long sizeByte) throws ApiException { VolumeDefinitionModify dfm = new VolumeDefinitionModify(); dfm.setSizeKib(sizeByte / 1024); @@ -688,13 +415,14 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver try { ResourceDefinition templateRD = LinstorUtil.findResourceDefinition( - linstorApi, templateRscName, getRscGrp(storagePoolVO)); + linstorApi, templateRscName, LinstorUtil.getRscGrp(storagePoolVO)); final String cloneRes = templateRD != null ? templateRD.getName() : templateRscName; logger.info("Clone resource definition {} to {}", cloneRes, rscName); ResourceDefinitionCloneRequest cloneRequest = new ResourceDefinitionCloneRequest(); cloneRequest.setName(rscName); if (volumeInfo.getPassphraseId() != null) { - List encryptionLayer = getEncryptedLayerList(linstorApi, getRscGrp(storagePoolVO)); + List encryptionLayer = LinstorUtil.getEncryptedLayerList( + linstorApi, LinstorUtil.getRscGrp(storagePoolVO)); cloneRequest.setLayerList(encryptionLayer); if (volumeInfo.getPassphrase() != null) { String utf8Passphrase = new String(volumeInfo.getPassphrase(), StandardCharsets.UTF_8); @@ -704,7 +432,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver ResourceDefinitionCloneStarted cloneStarted = linstorApi.resourceDefinitionClone( cloneRes, cloneRequest); - checkLinstorAnswersThrow(cloneStarted.getMessages()); + LinstorUtil.checkLinstorAnswersThrow(cloneStarted.getMessages()); if (!CloneWaiter.waitFor(linstorApi, cloneStarted)) { throw new CloudRuntimeException("Clone for resource " + rscName + " failed."); @@ -716,11 +444,12 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver resizeResource(linstorApi, rscName, volumeInfo.getSize()); } - updateRscGrpIfNecessary(linstorApi, rscName, getRscGrp(storagePoolVO)); + updateRscGrpIfNecessary(linstorApi, rscName, LinstorUtil.getRscGrp(storagePoolVO)); deleteTemplateForProps(linstorApi, rscName); LinstorUtil.applyAuxProps(linstorApi, rscName, volumeInfo.getName(), volumeInfo.getAttachedVmName()); - applyQoSSettings(storagePoolVO, linstorApi, rscName, volumeInfo.getMaxIops()); + LinstorUtil.applyQoSSettings( + _storagePoolDao, storagePoolVO, linstorApi, rscName, volumeInfo.getMaxIops()); return LinstorUtil.getDevicePath(linstorApi, rscName); } catch (ApiException apiEx) { @@ -744,7 +473,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver } private String createResourceFromSnapshot(long csSnapshotId, String rscName, StoragePoolVO storagePoolVO) { - final String rscGrp = getRscGrp(storagePoolVO); + final String rscGrp = LinstorUtil.getRscGrp(storagePoolVO); final DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress()); SnapshotVO snapshotVO = _snapshotDao.findById(csSnapshotId); @@ -757,22 +486,22 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver logger.debug("Create new resource definition: " + rscName); ResourceDefinitionCreate rdCreate = createResourceDefinitionCreate(rscName, rscGrp); ApiCallRcList answers = linstorApi.resourceDefinitionCreate(rdCreate); - checkLinstorAnswersThrow(answers); + LinstorUtil.checkLinstorAnswersThrow(answers); SnapshotRestore snapshotRestore = new SnapshotRestore(); snapshotRestore.toResource(rscName); logger.debug("Create new volume definition for snapshot: " + cloneRes + ":" + snapName); answers = linstorApi.resourceSnapshotsRestoreVolumeDefinition(cloneRes, snapName, snapshotRestore); - checkLinstorAnswersThrow(answers); + LinstorUtil.checkLinstorAnswersThrow(answers); // restore snapshot to new resource logger.info("Restore resource from snapshot: " + cloneRes + ":" + snapName); answers = linstorApi.resourceSnapshotRestore(cloneRes, snapName, snapshotRestore); - checkLinstorAnswersThrow(answers); + LinstorUtil.checkLinstorAnswersThrow(answers); LinstorUtil.applyAuxProps(linstorApi, rscName, volumeVO.getName(), null); - applyQoSSettings(storagePoolVO, linstorApi, rscName, volumeVO.getMaxIops()); + LinstorUtil.applyQoSSettings(_storagePoolDao, storagePoolVO, linstorApi, rscName, volumeVO.getMaxIops()); return LinstorUtil.getDevicePath(linstorApi, rscName); } catch (ApiException apiEx) { @@ -790,7 +519,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver } else if (csTemplateId > 0) { return cloneResource(csTemplateId, volumeInfo, storagePoolVO); } else { - return createResource(volumeInfo, storagePoolVO); + return LinstorUtil.createResource(volumeInfo, storagePoolVO, _storagePoolDao); } } @@ -1140,7 +869,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver String rscName, String snapshotName, String restoredName) throws ApiException { - final String rscGrp = getRscGrp(storagePoolVO); + final String rscGrp = LinstorUtil.getRscGrp(storagePoolVO); // try to delete -rst resource, could happen if the copy failed and noone deleted it. deleteResourceDefinition(storagePoolVO, restoredName); ResourceDefinitionCreate rdc = createResourceDefinitionCreate(restoredName, rscGrp); @@ -1185,7 +914,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver final StoragePoolVO pool = _storagePoolDao.findById(dstData.getDataStore().getId()); final DevelopersApi api = LinstorUtil.getLinstorAPI(pool.getHostAddress()); final String rscName = LinstorUtil.RSC_PREFIX + dstData.getUuid(); - boolean newCreated = createResourceBase( + boolean newCreated = LinstorUtil.createResourceBase( LinstorUtil.RSC_PREFIX + dstData.getUuid(), tInfo.getSize(), tInfo.getName(), @@ -1193,9 +922,10 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver null, null, api, - getRscGrp(pool), + LinstorUtil.getRscGrp(pool), pool.getId(), - true); + true, + false); Answer answer; if (newCreated) { @@ -1429,7 +1159,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver { resizeResource(api, rscName, resizeParameter.newSize); - applyQoSSettings(pool, api, rscName, resizeParameter.newMaxIops); + LinstorUtil.applyQoSSettings(_storagePoolDao, pool, api, rscName, resizeParameter.newMaxIops); { final VolumeVO volume = _volumeDao.findById(vol.getId()); volume.setMinIops(resizeParameter.newMinIops); @@ -1534,7 +1264,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver @Override public Pair getStorageStats(StoragePool storagePool) { logger.debug(String.format("Requesting storage stats: %s", storagePool)); - return LinstorUtil.getStorageStats(storagePool.getHostAddress(), getRscGrp(storagePool)); + return LinstorUtil.getStorageStats(storagePool.getHostAddress(), LinstorUtil.getRscGrp(storagePool)); } @Override diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java index 4196c12b116..7c45493dddc 100644 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java @@ -22,6 +22,8 @@ import com.linbit.linstor.api.ApiException; import com.linbit.linstor.api.DevelopersApi; import com.linbit.linstor.api.model.ApiCallRc; import com.linbit.linstor.api.model.ApiCallRcList; +import com.linbit.linstor.api.model.AutoSelectFilter; +import com.linbit.linstor.api.model.LayerType; import com.linbit.linstor.api.model.Node; import com.linbit.linstor.api.model.Properties; import com.linbit.linstor.api.model.ProviderKind; @@ -29,24 +31,36 @@ import com.linbit.linstor.api.model.Resource; import com.linbit.linstor.api.model.ResourceDefinition; import com.linbit.linstor.api.model.ResourceDefinitionModify; import com.linbit.linstor.api.model.ResourceGroup; +import com.linbit.linstor.api.model.ResourceGroupSpawn; import com.linbit.linstor.api.model.ResourceWithVolumes; import com.linbit.linstor.api.model.StoragePool; import com.linbit.linstor.api.model.Volume; +import com.linbit.linstor.api.model.VolumeDefinition; +import com.linbit.linstor.api.model.VolumeDefinitionModify; import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.stream.Collectors; import com.cloud.hypervisor.kvm.storage.KVMStoragePool; import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.logging.log4j.Logger; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.commons.collections.CollectionUtils; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.nio.charset.StandardCharsets; public class LinstorUtil { protected static Logger LOGGER = LogManager.getLogger(LinstorUtil.class); @@ -56,6 +70,8 @@ public class LinstorUtil { public static final String RSC_GROUP = "resourceGroup"; public static final String CS_TEMPLATE_FOR_PREFIX = "_cs-template-for-"; + public static final String LIN_PROP_DRBDOPT_EXACT_SIZE = "DrbdOptions/ExactSize"; + public static final String TEMP_VOLUME_ID = "tempVolumeId"; public static final String CLUSTER_DEFAULT_MIN_IOPS = "clusterDefaultMinIops"; @@ -76,6 +92,32 @@ public class LinstorUtil { .orElse((answers.get(0)).getMessage()) : null; } + public static void logLinstorAnswer(@Nonnull ApiCallRc answer) { + if (answer.isError()) { + LOGGER.error(answer.getMessage()); + } else if (answer.isWarning()) { + LOGGER.warn(answer.getMessage()); + } else if (answer.isInfo()) { + LOGGER.info(answer.getMessage()); + } + } + + public static void logLinstorAnswers(@Nonnull ApiCallRcList answers) { + answers.forEach(LinstorUtil::logLinstorAnswer); + } + + public static void checkLinstorAnswersThrow(@Nonnull ApiCallRcList answers) { + logLinstorAnswers(answers); + if (answers.hasError()) + { + String errMsg = answers.stream() + .filter(ApiCallRc::isError) + .findFirst() + .map(ApiCallRc::getMessage).orElse("Unknown linstor error"); + throw new CloudRuntimeException(errMsg); + } + } + public static List getLinstorNodeNames(@Nonnull DevelopersApi api) throws ApiException { List nodes = api.nodeList( @@ -488,4 +530,253 @@ public class LinstorUtil { } return false; } + + public static String getRscGrp(com.cloud.storage.StoragePool storagePool) { + return storagePool.getUserInfo() != null && !storagePool.getUserInfo().isEmpty() ? + storagePool.getUserInfo() : "DfltRscGrp"; + } + + /** + * Condition if a template resource can be shared with the given resource group. + * @param tgtRscGrp + * @param tgtLayerStack + * @param rg + * @return True if the template resource can be shared, else false. + */ + private static boolean canShareTemplateForResourceGroup( + ResourceGroup tgtRscGrp, List tgtLayerStack, ResourceGroup rg) { + List rgLayerStack = rg.getSelectFilter() != null ? + rg.getSelectFilter().getLayerStack() : null; + return Objects.equals(tgtLayerStack, rgLayerStack) && + Objects.equals(tgtRscGrp.getSelectFilter().getStoragePoolList(), + rg.getSelectFilter().getStoragePoolList()); + } + + /** + * Searches for a shareable template for this rscGrpName and sets the aux template property. + * @param api + * @param rscName + * @param rscGrpName + * @param existingRDs + * @return + * @throws ApiException + */ + private static boolean foundShareableTemplate( + DevelopersApi api, String rscName, String rscGrpName, + List> existingRDs) throws ApiException { + if (!existingRDs.isEmpty()) { + ResourceGroup tgtRscGrp = api.resourceGroupList( + Collections.singletonList(rscGrpName), null, null, null).get(0); + List tgtLayerStack = tgtRscGrp.getSelectFilter() != null ? + tgtRscGrp.getSelectFilter().getLayerStack() : null; + + // check if there is already a template copy, that we could reuse + // this means if select filters are similar enough to allow cloning from + for (Pair rdPair : existingRDs) { + ResourceGroup rg = rdPair.second(); + if (canShareTemplateForResourceGroup(tgtRscGrp, tgtLayerStack, rg)) { + LinstorUtil.setAuxTemplateForProperty(api, rscName, rscGrpName); + return true; + } + } + } + return false; + } + + /** + * Returns the layerlist of the resourceGroup with encryption(LUKS) added above STORAGE. + * If the resourceGroup layer list already contains LUKS this layer list will be returned. + * @param api Linstor developers API + * @param resourceGroup Resource group to get the encryption layer list + * @return layer list with LUKS added + */ + public static List getEncryptedLayerList(DevelopersApi api, String resourceGroup) { + try { + List rscGrps = api.resourceGroupList( + Collections.singletonList(resourceGroup), Collections.emptyList(), null, null); + + if (CollectionUtils.isEmpty(rscGrps)) { + throw new CloudRuntimeException( + String.format("Resource Group %s not found on Linstor cluster.", resourceGroup)); + } + + final ResourceGroup rscGrp = rscGrps.get(0); + List layers = Arrays.asList(LayerType.DRBD, LayerType.LUKS, LayerType.STORAGE); + List curLayerStack = rscGrp.getSelectFilter() != null ? + rscGrp.getSelectFilter().getLayerStack() : Collections.emptyList(); + if (CollectionUtils.isNotEmpty(curLayerStack)) { + layers = curLayerStack.stream().map(LayerType::valueOf).collect(Collectors.toList()); + if (!layers.contains(LayerType.LUKS)) { + layers.add(layers.size() - 1, LayerType.LUKS); // lowest layer is STORAGE + } + } + return layers; + } catch (ApiException e) { + throw new CloudRuntimeException( + String.format("Resource Group %s not found on Linstor cluster.", resourceGroup)); + } + } + + /** + * Spawns a new Linstor resource with the given arguments. + * @param api + * @param newRscName + * @param sizeInBytes + * @param isTemplate + * @param rscGrpName + * @param volName + * @param vmName + * @throws ApiException + */ + private static void spawnResource( + DevelopersApi api, String newRscName, long sizeInBytes, boolean isTemplate, String rscGrpName, + String volName, String vmName, @Nullable Long passPhraseId, @Nullable byte[] passPhrase, + boolean exactSize) throws ApiException + { + ResourceGroupSpawn rscGrpSpawn = new ResourceGroupSpawn(); + rscGrpSpawn.setResourceDefinitionName(newRscName); + rscGrpSpawn.addVolumeSizesItem(sizeInBytes / 1024); + if (passPhraseId != null) { + AutoSelectFilter asf = new AutoSelectFilter(); + List luksLayers = getEncryptedLayerList(api, rscGrpName); + asf.setLayerStack(luksLayers.stream().map(LayerType::toString).collect(Collectors.toList())); + rscGrpSpawn.setSelectFilter(asf); + if (passPhrase != null) { + String utf8Passphrase = new String(passPhrase, StandardCharsets.UTF_8); + rscGrpSpawn.setVolumePassphrases(Collections.singletonList(utf8Passphrase)); + } + } + + Properties props = new Properties(); + if (isTemplate) { + props.put(LinstorUtil.getTemplateForAuxPropKey(rscGrpName), "true"); + } + if (exactSize) { + props.put(LIN_PROP_DRBDOPT_EXACT_SIZE, "true"); + } + rscGrpSpawn.setResourceDefinitionProps(props); + + LOGGER.info("Linstor: Spawn resource " + newRscName); + ApiCallRcList answers = api.resourceGroupSpawn(rscGrpName, rscGrpSpawn); + checkLinstorAnswersThrow(answers); + + answers = LinstorUtil.applyAuxProps(api, newRscName, volName, vmName); + checkLinstorAnswersThrow(answers); + } + + /** + * Creates a new Linstor resource. + * @param rscName + * @param sizeInBytes + * @param volName + * @param vmName + * @param api + * @param rscGrp + * @param poolId + * @param isTemplate indicates if the resource is a template + * @return true if a new resource was created, false if it already existed or was reused. + */ + public static boolean createResourceBase( + String rscName, long sizeInBytes, String volName, String vmName, + @Nullable Long passPhraseId, @Nullable byte[] passPhrase, DevelopersApi api, + String rscGrp, long poolId, boolean isTemplate, boolean exactSize) + { + try + { + LOGGER.debug("createRscBase: {} :: {} :: {} :: {}", rscName, rscGrp, isTemplate, exactSize); + List> existingRDs = LinstorUtil.getRDAndRGListStartingWith(api, rscName); + + String fullRscName = String.format("%s-%d", rscName, poolId); + boolean alreadyCreated = existingRDs.stream() + .anyMatch(p -> p.first().getName().equalsIgnoreCase(fullRscName)) || + existingRDs.stream().anyMatch(p -> p.first().getProps().containsKey(LinstorUtil.getTemplateForAuxPropKey(rscGrp))); + if (!alreadyCreated) { + boolean createNewRsc = !foundShareableTemplate(api, rscName, rscGrp, existingRDs); + if (createNewRsc) { + String newRscName = existingRDs.isEmpty() ? rscName : fullRscName; + spawnResource(api, newRscName, sizeInBytes, isTemplate, rscGrp, + volName, vmName, passPhraseId, passPhrase, exactSize); + } + return createNewRsc; + } + return false; + } catch (ApiException apiEx) + { + LOGGER.error("Linstor: ApiEx - {}", apiEx.getMessage()); + throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); + } + } + + public static void applyQoSSettings(PrimaryDataStoreDao primaryDataStoreDao, + StoragePoolVO storagePool, DevelopersApi api, String rscName, Long maxIops) + throws ApiException + { + Long currentQosIops = null; + List vlmDfns = api.volumeDefinitionList(rscName, null, null); + if (!vlmDfns.isEmpty()) + { + Properties props = vlmDfns.get(0).getProps(); + long iops = Long.parseLong(props.getOrDefault("sys/fs/blkio_throttle_write_iops", "0")); + currentQosIops = iops > 0 ? iops : null; + } + + if (!Objects.equals(maxIops, currentQosIops)) + { + VolumeDefinitionModify vdm = new VolumeDefinitionModify(); + if (maxIops != null) + { + Properties props = new Properties(); + props.put("sys/fs/blkio_throttle_read_iops", "" + maxIops); + props.put("sys/fs/blkio_throttle_write_iops", "" + maxIops); + vdm.overrideProps(props); + LOGGER.info("Apply qos setting: {} to {}", maxIops, rscName); + } + else + { + LOGGER.info("Remove QoS setting for {}", rscName); + vdm.deleteProps(Arrays.asList("sys/fs/blkio_throttle_read_iops", "sys/fs/blkio_throttle_write_iops")); + } + ApiCallRcList answers = api.volumeDefinitionModify(rscName, 0, vdm); + LinstorUtil.checkLinstorAnswersThrow(answers); + + Long capacityIops = storagePool.getCapacityIops(); + if (capacityIops != null) + { + long vcIops = currentQosIops != null ? currentQosIops * -1 : 0; + long vMaxIops = maxIops != null ? maxIops : 0; + long newIops = vcIops + vMaxIops; + capacityIops -= newIops; + LOGGER.info("Current storagepool {} iops capacity: {}", storagePool, capacityIops); + storagePool.setCapacityIops(Math.max(0, capacityIops)); + primaryDataStoreDao.update(storagePool.getId(), storagePool); + } + } + } + + public static String createResource(VolumeInfo vol, StoragePoolVO storagePoolVO, + PrimaryDataStoreDao primaryDataStoreDao) { + return createResource(vol, storagePoolVO, primaryDataStoreDao, false); + } + + public static String createResource(VolumeInfo vol, StoragePoolVO storagePoolVO, + PrimaryDataStoreDao primaryDataStoreDao, boolean exactSize) { + DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress()); + final String rscGrp = getRscGrp(storagePoolVO); + + final String rscName = LinstorUtil.RSC_PREFIX + vol.getUuid(); + createResourceBase( + rscName, vol.getSize(), vol.getName(), vol.getAttachedVmName(), vol.getPassphraseId(), vol.getPassphrase(), + linstorApi, rscGrp, storagePoolVO.getId(), false, exactSize); + + try + { + applyQoSSettings(primaryDataStoreDao, storagePoolVO, linstorApi, rscName, vol.getMaxIops()); + + return LinstorUtil.getDevicePath(linstorApi, rscName); + } catch (ApiException apiEx) + { + LOGGER.error("Linstor: ApiEx - " + apiEx.getMessage()); + throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); + } + } } diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/motion/LinstorDataMotionStrategy.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/motion/LinstorDataMotionStrategy.java new file mode 100644 index 00000000000..cab2820f09a --- /dev/null +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/motion/LinstorDataMotionStrategy.java @@ -0,0 +1,437 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.cloudstack.storage.motion; + +import com.linbit.linstor.api.ApiException; +import com.linbit.linstor.api.DevelopersApi; +import com.linbit.linstor.api.model.ApiCallRcList; +import com.linbit.linstor.api.model.ResourceDefinition; +import com.linbit.linstor.api.model.ResourceDefinitionModify; + +import javax.inject.Inject; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.MigrateAnswer; +import com.cloud.agent.api.MigrateCommand; +import com.cloud.agent.api.PrepareForMigrationCommand; +import com.cloud.agent.api.to.DataObjectType; +import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.Host; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.GuestOSCategoryDao; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.dao.VMInstanceDao; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.command.CopyCmdAnswer; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.util.LinstorUtil; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.ObjectUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.springframework.stereotype.Component; + + +/** + * Current state: + * just changing the resource-group on same storage pool resource-group is not really good enough. + * Linstor lacks currently of a good way to move resources to another resource-group and respecting + * every auto-filter setting. + * Also linstor clone would simply set the new resource-group without any adjustments of storage pools or + * auto-select resource placement. + * So currently, we will create a new resource in the wanted primary storage and let qemu copy the data into the + * devices. + */ + +@Component +public class LinstorDataMotionStrategy implements DataMotionStrategy { + protected Logger logger = LogManager.getLogger(getClass()); + + @Inject + private SnapshotDataStoreDao _snapshotStoreDao; + @Inject + private PrimaryDataStoreDao _storagePool; + @Inject + private VolumeDao _volumeDao; + @Inject + private VolumeDataFactory _volumeDataFactory; + @Inject + private VMInstanceDao _vmDao; + @Inject + private GuestOSDao _guestOsDao; + @Inject + private VolumeService _volumeService; + @Inject + private GuestOSCategoryDao _guestOsCategoryDao; + @Inject + private SnapshotDao _snapshotDao; + @Inject + private AgentManager _agentManager; + @Inject + private PrimaryDataStoreDao _storagePoolDao; + + @Override + public StrategyPriority canHandle(DataObject srcData, DataObject dstData) { + DataObjectType srcType = srcData.getType(); + DataObjectType dstType = dstData.getType(); + logger.debug("canHandle: {} -> {}", srcType, dstType); + return StrategyPriority.CANT_HANDLE; + } + + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, + AsyncCompletionCallback callback) { + throw new CloudRuntimeException("not implemented"); + } + + private boolean isDestinationLinstorPrimaryStorage(Map volumeMap) { + if (MapUtils.isNotEmpty(volumeMap)) { + for (DataStore dataStore : volumeMap.values()) { + StoragePoolVO storagePoolVO = _storagePool.findById(dataStore.getId()); + if (storagePoolVO == null + || !storagePoolVO.getStorageProviderName().equals(LinstorUtil.PROVIDER_NAME)) { + return false; + } + } + } else { + return false; + } + return true; + } + + @Override + public StrategyPriority canHandle(Map volumeMap, Host srcHost, Host destHost) { + logger.debug("canHandle -- {}: {} -> {}", volumeMap, srcHost, destHost); + if (srcHost.getId() != destHost.getId() && isDestinationLinstorPrimaryStorage(volumeMap)) { + return StrategyPriority.HIGHEST; + } + return StrategyPriority.CANT_HANDLE; + } + + private VolumeVO createNewVolumeVO(Volume volume, StoragePoolVO storagePoolVO) { + VolumeVO newVol = new VolumeVO(volume); + newVol.setInstanceId(null); + newVol.setChainInfo(null); + newVol.setPath(newVol.getUuid()); + newVol.setFolder(null); + newVol.setPodId(storagePoolVO.getPodId()); + newVol.setPoolId(storagePoolVO.getId()); + newVol.setLastPoolId(volume.getPoolId()); + + return _volumeDao.persist(newVol); + } + + private void removeExactSizeProperty(VolumeInfo volumeInfo) { + StoragePoolVO destStoragePool = _storagePool.findById(volumeInfo.getDataStore().getId()); + DevelopersApi api = LinstorUtil.getLinstorAPI(destStoragePool.getHostAddress()); + + ResourceDefinitionModify rdm = new ResourceDefinitionModify(); + rdm.setDeleteProps(Collections.singletonList(LinstorUtil.LIN_PROP_DRBDOPT_EXACT_SIZE)); + try { + String rscName = LinstorUtil.RSC_PREFIX + volumeInfo.getPath(); + ApiCallRcList answers = api.resourceDefinitionModify(rscName, rdm); + LinstorUtil.checkLinstorAnswersThrow(answers); + } catch (ApiException apiEx) { + logger.error("Linstor: ApiEx - {}", apiEx.getMessage()); + throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); + } + } + + private void handlePostMigration(boolean success, Map srcVolumeInfoToDestVolumeInfo, + VirtualMachineTO vmTO, Host destHost) { + if (!success) { + try { + PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(vmTO); + + pfmc.setRollback(true); + + Answer pfma = _agentManager.send(destHost.getId(), pfmc); + + if (pfma == null || !pfma.getResult()) { + String details = pfma != null ? pfma.getDetails() : "null answer returned"; + String msg = "Unable to rollback prepare for migration due to the following: " + details; + + throw new AgentUnavailableException(msg, destHost.getId()); + } + } catch (Exception e) { + logger.debug("Failed to disconnect one or more (original) dest volumes", e); + } + } + + for (Map.Entry entry : srcVolumeInfoToDestVolumeInfo.entrySet()) { + VolumeInfo srcVolumeInfo = entry.getKey(); + VolumeInfo destVolumeInfo = entry.getValue(); + + if (success) { + srcVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.OperationSucceeded); + destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.OperationSucceeded); + + _volumeDao.updateUuid(srcVolumeInfo.getId(), destVolumeInfo.getId()); + + VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId()); + + volumeVO.setFormat(Storage.ImageFormat.QCOW2); + + _volumeDao.update(volumeVO.getId(), volumeVO); + + // remove exact size property + removeExactSizeProperty(destVolumeInfo); + + try { + _volumeService.destroyVolume(srcVolumeInfo.getId()); + + srcVolumeInfo = _volumeDataFactory.getVolume(srcVolumeInfo.getId()); + + AsyncCallFuture destroyFuture = + _volumeService.expungeVolumeAsync(srcVolumeInfo); + + if (destroyFuture.get().isFailed()) { + logger.debug("Failed to clean up source volume on storage"); + } + } catch (Exception e) { + logger.debug("Failed to clean up source volume on storage", e); + } + + // Update the volume ID for snapshots on secondary storage + if (!_snapshotDao.listByVolumeId(srcVolumeInfo.getId()).isEmpty()) { + _snapshotDao.updateVolumeIds(srcVolumeInfo.getId(), destVolumeInfo.getId()); + _snapshotStoreDao.updateVolumeIds(srcVolumeInfo.getId(), destVolumeInfo.getId()); + } + } else { + try { + _volumeService.revokeAccess(destVolumeInfo, destHost, destVolumeInfo.getDataStore()); + } catch (Exception e) { + logger.debug("Failed to revoke access from dest volume", e); + } + + destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed); + srcVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed); + + try { + _volumeService.destroyVolume(destVolumeInfo.getId()); + + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId()); + + AsyncCallFuture destroyFuture = + _volumeService.expungeVolumeAsync(destVolumeInfo); + + if (destroyFuture.get().isFailed()) { + logger.debug("Failed to clean up dest volume on storage"); + } + } catch (Exception e) { + logger.debug("Failed to clean up dest volume on storage", e); + } + } + } + } + + /** + * Determines whether the destination volume should have the DRBD exact-size property set + * during migration. + * + *

This method queries the Linstor API to check if the source volume's resource definition + * has the exact-size DRBD option enabled. The exact-size property ensures that DRBD uses + * the precise volume size rather than rounding, which is important for maintaining size + * consistency during migrations.

+ * + * @param srcVolumeInfo the source volume information to check + * @return {@code true} if the exact-size property should be set on the destination volume, + * which occurs when the source volume has this property enabled, or when the + * property cannot be determined (defaults to {@code true} for safety); + * {@code false} only when the source is confirmed to not have the exact-size property + */ + private boolean needsExactSizeProp(VolumeInfo srcVolumeInfo) { + StoragePoolVO srcStoragePool = _storagePool.findById(srcVolumeInfo.getDataStore().getId()); + if (srcStoragePool.getPoolType() == Storage.StoragePoolType.Linstor) { + DevelopersApi api = LinstorUtil.getLinstorAPI(srcStoragePool.getHostAddress()); + + String rscName = LinstorUtil.RSC_PREFIX + srcVolumeInfo.getPath(); + try { + List rscDfns = api.resourceDefinitionList( + Collections.singletonList(rscName), + false, + Collections.emptyList(), + null, + null); + if (!CollectionUtils.isEmpty(rscDfns)) { + ResourceDefinition srcRsc = rscDfns.get(0); + String exactSizeProp = srcRsc.getProps().get(LinstorUtil.LIN_PROP_DRBDOPT_EXACT_SIZE); + return "true".equalsIgnoreCase(exactSizeProp); + } else { + logger.warn("Unknown resource {} on {}", rscName, srcStoragePool.getHostAddress()); + } + } catch (ApiException apiEx) { + logger.error("Unable to fetch resource definition {}: {}", rscName, apiEx.getBestMessage()); + } + } + return true; + } + + @Override + public void copyAsync(Map volumeDataStoreMap, VirtualMachineTO vmTO, Host srcHost, + Host destHost, AsyncCompletionCallback callback) { + + if (srcHost.getHypervisorType() != Hypervisor.HypervisorType.KVM) { + throw new CloudRuntimeException( + String.format("Invalid hypervisor type [%s]. Only KVM supported", srcHost.getHypervisorType())); + } + + String errMsg = null; + VMInstanceVO vmInstance = _vmDao.findById(vmTO.getId()); + vmTO.setState(vmInstance.getState()); + List migrateDiskInfoList = new ArrayList<>(); + + Map migrateStorage = new HashMap<>(); + Map srcVolumeInfoToDestVolumeInfo = new HashMap<>(); + + try { + for (Map.Entry entry : volumeDataStoreMap.entrySet()) { + VolumeInfo srcVolumeInfo = entry.getKey(); + DataStore destDataStore = entry.getValue(); + VolumeVO srcVolume = _volumeDao.findById(srcVolumeInfo.getId()); + StoragePoolVO destStoragePool = _storagePool.findById(destDataStore.getId()); + + if (srcVolumeInfo.getPassphraseId() != null) { + throw new CloudRuntimeException( + String.format("Cannot live migrate encrypted volume: %s", srcVolumeInfo.getVolume())); + } + + VolumeVO destVolume = createNewVolumeVO(srcVolume, destStoragePool); + + VolumeInfo destVolumeInfo = _volumeDataFactory.getVolume(destVolume.getId(), destDataStore); + + destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrationCopyRequested); + destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrationCopySucceeded); + destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); + + boolean exactSize = needsExactSizeProp(srcVolumeInfo); + + String devPath = LinstorUtil.createResource( + destVolumeInfo, destStoragePool, _storagePoolDao, exactSize); + + _volumeDao.update(destVolume.getId(), destVolume); + destVolume = _volumeDao.findById(destVolume.getId()); + + destVolumeInfo = _volumeDataFactory.getVolume(destVolume.getId(), destDataStore); + + MigrateCommand.MigrateDiskInfo migrateDiskInfo = new MigrateCommand.MigrateDiskInfo( + srcVolumeInfo.getPath(), + MigrateCommand.MigrateDiskInfo.DiskType.BLOCK, + MigrateCommand.MigrateDiskInfo.DriverType.RAW, + MigrateCommand.MigrateDiskInfo.Source.DEV, + devPath); + migrateDiskInfoList.add(migrateDiskInfo); + + migrateStorage.put(srcVolumeInfo.getPath(), migrateDiskInfo); + + srcVolumeInfoToDestVolumeInfo.put(srcVolumeInfo, destVolumeInfo); + } + + PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(vmTO); + try { + Answer pfma = _agentManager.send(destHost.getId(), pfmc); + + if (pfma == null || !pfma.getResult()) { + String details = pfma != null ? pfma.getDetails() : "null answer returned"; + errMsg = String.format("Unable to prepare for migration due to the following: %s", details); + + throw new AgentUnavailableException(errMsg, destHost.getId()); + } + } catch (final OperationTimedoutException e) { + errMsg = String.format("Operation timed out due to %s", e.getMessage()); + throw new AgentUnavailableException(errMsg, destHost.getId()); + } + + VMInstanceVO vm = _vmDao.findById(vmTO.getId()); + boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()) + .getName().equalsIgnoreCase("Windows"); + + MigrateCommand migrateCommand = new MigrateCommand(vmTO.getName(), + destHost.getPrivateIpAddress(), isWindows, vmTO, true); + migrateCommand.setWait(StorageManager.KvmStorageOnlineMigrationWait.value()); + migrateCommand.setMigrateStorage(migrateStorage); + migrateCommand.setMigrateStorageManaged(true); + migrateCommand.setNewVmCpuShares( + vmTO.getCpus() * ObjectUtils.defaultIfNull(vmTO.getMinSpeed(), vmTO.getSpeed())); + migrateCommand.setMigrateDiskInfoList(migrateDiskInfoList); + + boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value(); + migrateCommand.setAutoConvergence(kvmAutoConvergence); + + MigrateAnswer migrateAnswer = (MigrateAnswer) _agentManager.send(srcHost.getId(), migrateCommand); + boolean success = migrateAnswer != null && migrateAnswer.getResult(); + + handlePostMigration(success, srcVolumeInfoToDestVolumeInfo, vmTO, destHost); + + if (migrateAnswer == null) { + throw new CloudRuntimeException("Unable to get an answer to the migrate command"); + } + + if (!migrateAnswer.getResult()) { + errMsg = migrateAnswer.getDetails(); + + throw new CloudRuntimeException(errMsg); + } + } catch (AgentUnavailableException | OperationTimedoutException | CloudRuntimeException ex) { + errMsg = String.format( + "Copy volume(s) of VM [%s] to storage(s) [%s] and VM to host [%s] failed in LinstorDataMotionStrategy.copyAsync. Error message: [%s].", + vmTO, srcHost, destHost, ex.getMessage()); + logger.error(errMsg, ex); + + throw new CloudRuntimeException(errMsg); + } finally { + CopyCmdAnswer copyCmdAnswer = new CopyCmdAnswer(errMsg); + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + result.setResult(errMsg); + callback.complete(result); + } + } +} diff --git a/plugins/storage/volume/linstor/src/main/resources/META-INF/cloudstack/storage-volume-linstor/spring-storage-volume-linstor-context.xml b/plugins/storage/volume/linstor/src/main/resources/META-INF/cloudstack/storage-volume-linstor/spring-storage-volume-linstor-context.xml index a900323ede5..88d1051c71e 100644 --- a/plugins/storage/volume/linstor/src/main/resources/META-INF/cloudstack/storage-volume-linstor/spring-storage-volume-linstor-context.xml +++ b/plugins/storage/volume/linstor/src/main/resources/META-INF/cloudstack/storage-volume-linstor/spring-storage-volume-linstor-context.xml @@ -33,4 +33,6 @@ class="org.apache.cloudstack.storage.snapshot.LinstorVMSnapshotStrategy" /> + diff --git a/plugins/storage/volume/linstor/src/test/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImplTest.java b/plugins/storage/volume/linstor/src/test/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImplTest.java index 75276739468..4653cfa358b 100644 --- a/plugins/storage/volume/linstor/src/test/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImplTest.java +++ b/plugins/storage/volume/linstor/src/test/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImplTest.java @@ -26,6 +26,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import org.apache.cloudstack.storage.datastore.util.LinstorUtil; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -75,13 +76,13 @@ public class LinstorPrimaryDataStoreDriverImplTest { when(api.resourceGroupList(Collections.singletonList("EncryptedGrp"), Collections.emptyList(), null, null)) .thenReturn(Collections.singletonList(encryptedGrp)); - List layers = linstorPrimaryDataStoreDriver.getEncryptedLayerList(api, "DfltRscGrp"); + List layers = LinstorUtil.getEncryptedLayerList(api, "DfltRscGrp"); Assert.assertEquals(Arrays.asList(LayerType.DRBD, LayerType.LUKS, LayerType.STORAGE), layers); - layers = linstorPrimaryDataStoreDriver.getEncryptedLayerList(api, "BcacheGrp"); + layers = LinstorUtil.getEncryptedLayerList(api, "BcacheGrp"); Assert.assertEquals(Arrays.asList(LayerType.DRBD, LayerType.BCACHE, LayerType.LUKS, LayerType.STORAGE), layers); - layers = linstorPrimaryDataStoreDriver.getEncryptedLayerList(api, "EncryptedGrp"); + layers = LinstorUtil.getEncryptedLayerList(api, "EncryptedGrp"); Assert.assertEquals(Arrays.asList(LayerType.DRBD, LayerType.LUKS, LayerType.STORAGE), layers); } } From 3ba5c2b610f1a13cae612462720ffaf624b76501 Mon Sep 17 00:00:00 2001 From: Andrija Panic <45762285+andrijapanicsb@users.noreply.github.com> Date: Thu, 29 Jan 2026 11:27:46 +0100 Subject: [PATCH 101/126] Fix logs mismatch between Network GC wait and interval (#10776) Co-authored-by: nvazquez Co-authored-by: dahn Co-authored-by: Wei Zhou --- .../engine/orchestration/NetworkOrchestrator.java | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index d19a4f1d4a0..899ce51022b 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -3559,8 +3559,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra final HashMap stillFree = new HashMap(); final List networkIds = _networksDao.findNetworksToGarbageCollect(); - final int netGcWait = NumbersUtil.parseInt(_configDao.getValue(NetworkGcWait.key()), 60); - logger.info("NetworkGarbageCollector uses '{}' seconds for GC interval.", netGcWait); + final int netGcWait = NetworkGcWait.value(); + final int netGcInterval = NetworkGcInterval.value(); + logger.info("NetworkGarbageCollector uses '{}' seconds for GC wait and '{}' seconds for GC interval.", netGcWait, netGcInterval); for (final Long networkId : networkIds) { if (!_networkModel.isNetworkReadyForGc(networkId)) { @@ -4882,9 +4883,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } public static final ConfigKey NetworkGcWait = new ConfigKey(Integer.class, "network.gc.wait", "Advanced", "600", - "Time (in seconds) to wait before shutting down a network that's not in used", false, Scope.Global, null); + "Time (in seconds) to wait before shutting down a network that's not in used", true, Scope.Global, null); public static final ConfigKey NetworkGcInterval = new ConfigKey(Integer.class, "network.gc.interval", "Advanced", "600", - "Seconds to wait before checking for networks to shutdown", true, Scope.Global, null); + "Seconds to wait before checking for networks to shutdown", false, Scope.Global, null); @Override public ConfigKey[] getConfigKeys() { From 0b62fb5e20c27116f11bd8af72255b45d5703c02 Mon Sep 17 00:00:00 2001 From: Manoj Kumar Date: Thu, 29 Jan 2026 16:06:19 +0530 Subject: [PATCH 102/126] Add cloud image downloader script (#11918) --- tools/utils/cloud-image-downloader.sh | 259 ++++++++++++++++++++++++++ 1 file changed, 259 insertions(+) create mode 100755 tools/utils/cloud-image-downloader.sh diff --git a/tools/utils/cloud-image-downloader.sh b/tools/utils/cloud-image-downloader.sh new file mode 100755 index 00000000000..90f234906e1 --- /dev/null +++ b/tools/utils/cloud-image-downloader.sh @@ -0,0 +1,259 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +#------------------------------------------------------------------------------- +# Configuration +#------------------------------------------------------------------------------- +# This section contains the variables you might want to change. + +# The temporary directory where files will be downloaded. +# It's a good practice to create a unique temporary directory for each script run. +TEMP_DIR=$(mktemp -d) + +# The BASE destination directory for the downloaded image files. +# Subdirectories for each distro will be created inside this one. +# Make sure this directory exists before running the script. +# Must be executed by the cloudstack user on machine hosting the public download site. +# It will be publicly available at https://download.cloudstack.org/templates/cloud-images/ +DEST_DIR="${HOME}/repository/templates/cloud-images" + +# The directory where log files will be stored. +# Make sure this directory exists. +LOG_DIR="${HOME}/log/cloud-image-downloader" +LOG_FILE="${LOG_DIR}/cloud-image-downloader_$(date +%Y%m%d_%H%M%S).log" +LOG_RETENTION_DAYS=30 + +LOGGER_TAG="cloud-image-downloader" +LOGGER_FACILITY="user" +LOGGER_AVAILABLE=false + +log_message() { + local priority=$1 + shift + local message="$*" + local timestamp=$(date +'%Y-%m-%d %H:%M:%S') + + # Log to file + echo "${timestamp} [${priority}] ${message}" | tee -a "${LOG_FILE}" + + # Log to syslog using logger utility + if [ "${LOGGER_AVAILABLE}" = true ]; then + logger -t "${LOGGER_TAG}" -p "${LOGGER_FACILITY}.${priority}" -- "${message}" + fi +} + +log_info() { + log_message "info" "$@" +} + +log_warn() { + log_message "warning" "$@" +} + +log_error() { + log_message "err" "$@" +} + +cleanup_old_logs() { + log_info "Cleaning up log files older than ${LOG_RETENTION_DAYS} days..." + + if [ ! -d "$LOG_DIR" ]; then + log_warn "Log directory does not exist: $LOG_DIR" + return + fi + + local deleted_count=0 + + # Find and delete log files older than retention period + while IFS= read -r -d '' log_file; do + rm -f "$log_file" + deleted_count=$((deleted_count + 1)) + done < <(find "$LOG_DIR" -name "*.log" -type f -mtime +${LOG_RETENTION_DAYS} -print0 2>/dev/null) + + if [ $deleted_count -gt 0 ]; then + log_info "Deleted $deleted_count old log file(s)" + else + log_info "No old log files to delete" + fi +} + +#------------------------------------------------------------------------------- +# Image Definitions +#------------------------------------------------------------------------------- +# To add a new image, you must add an entry to BOTH arrays below. + +# 1. Add the destination filename and the download URL. +declare -A IMAGE_URLS=( + ["Rocky-9-GenericCloud.latest.x86_64.qcow2"]="https://dl.rockylinux.org/pub/rocky/9/images/x86_64/Rocky-9-GenericCloud.latest.x86_64.qcow2" + ["Rocky-9-GenericCloud.latest.aarch64.qcow2"]="https://dl.rockylinux.org/pub/rocky/9/images/aarch64/Rocky-9-GenericCloud.latest.aarch64.qcow2" + ["Rocky-8-GenericCloud.latest.x86_64.qcow2"]="https://dl.rockylinux.org/pub/rocky/8/images/x86_64/Rocky-8-GenericCloud.latest.x86_64.qcow2" + ["Rocky-8-GenericCloud.latest.aarch64.qcow2"]="https://dl.rockylinux.org/pub/rocky/8/images/aarch64/Rocky-8-GenericCloud.latest.aarch64.qcow2" + ["openSUSE-Leap-15.5-Minimal-VM.x86_64-Cloud.qcow2"]="https://download.opensuse.org/distribution/leap/15.5/appliances/openSUSE-Leap-15.5-Minimal-VM.x86_64-Cloud.qcow2" + ["openSUSE-Leap-15.5-Minimal-VM.aarch64-Cloud.qcow2"]="https://download.opensuse.org/distribution/leap/15.5/appliances/openSUSE-Leap-15.5-Minimal-VM.aarch64-Cloud.qcow2" + ["debian-12-genericcloud-amd64.qcow2"]="https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-genericcloud-amd64.qcow2" + ["debian-12-genericcloud-arm64.qcow2"]="https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-genericcloud-arm64.qcow2" + ["ubuntu-24.04-server-cloudimg-amd64.img"]="https://cloud-images.ubuntu.com/releases/24.04/release/ubuntu-24.04-server-cloudimg-amd64.img" + ["ubuntu-24.04-server-cloudimg-arm64.img"]="https://cloud-images.ubuntu.com/releases/24.04/release/ubuntu-24.04-server-cloudimg-arm64.img" + ["ubuntu-22.04-server-cloudimg-amd64.img"]="https://cloud-images.ubuntu.com/releases/22.04/release/ubuntu-22.04-server-cloudimg-amd64.img" + ["ubuntu-22.04-server-cloudimg-arm64.img"]="https://cloud-images.ubuntu.com/releases/22.04/release/ubuntu-22.04-server-cloudimg-arm64.img" + ["ubuntu-20.04-server-cloudimg-amd64.img"]="https://cloud-images.ubuntu.com/releases/20.04/release/ubuntu-20.04-server-cloudimg-amd64.img" + ["ubuntu-20.04-server-cloudimg-arm64.img"]="https://cloud-images.ubuntu.com/releases/20.04/release/ubuntu-20.04-server-cloudimg-arm64.img" + ["OL9U5_x86_64-kvm-b259.qcow2"]="https://yum.oracle.com/templates/OracleLinux/OL9/u5/x86_64/OL9U5_x86_64-kvm-b259.qcow2" + ["OL9U5_aarch64-kvm-b126.qcow2"]="https://yum.oracle.com/templates/OracleLinux/OL9/u5/aarch64/OL9U5_aarch64-kvm-b126.qcow2" + ["OL8U10_x86_64-kvm-b258.qcow2"]="https://yum.oracle.com/templates/OracleLinux/OL8/u10/x86_64/OL8U10_x86_64-kvm-b258.qcow2" + ["OL8U10_aarch64-kvm-b122.qcow2"]="https://yum.oracle.com/templates/OracleLinux/OL8/u10/aarch64/OL8U10_aarch64-kvm-b122.qcow2" +) + +# 2. Add the destination filename and its corresponding distribution subdirectory name. +declare -A IMAGE_DISTROS=( + ["Rocky-9-GenericCloud.latest.x86_64.qcow2"]="rockylinux" + ["Rocky-9-GenericCloud.latest.aarch64.qcow2"]="rockylinux" + ["Rocky-8-GenericCloud.latest.x86_64.qcow2"]="rockylinux" + ["Rocky-8-GenericCloud.latest.aarch64.qcow2"]="rockylinux" + ["openSUSE-Leap-15.5-Minimal-VM.x86_64-Cloud.qcow2"]="opensuse" + ["openSUSE-Leap-15.5-Minimal-VM.aarch64-Cloud.qcow2"]="opensuse" + ["debian-12-genericcloud-amd64.qcow2"]="debian" + ["debian-12-genericcloud-arm64.qcow2"]="debian" + ["ubuntu-24.04-server-cloudimg-amd64.img"]="ubuntu" + ["ubuntu-24.04-server-cloudimg-arm64.img"]="ubuntu" + ["ubuntu-22.04-server-cloudimg-amd64.img"]="ubuntu" + ["ubuntu-22.04-server-cloudimg-arm64.img"]="ubuntu" + ["ubuntu-20.04-server-cloudimg-amd64.img"]="ubuntu" + ["ubuntu-20.04-server-cloudimg-arm64.img"]="ubuntu" + ["OL9U5_x86_64-kvm-b259.qcow2"]="oraclelinux" + ["OL9U5_aarch64-kvm-b126.qcow2"]="oraclelinux" + ["OL8U10_x86_64-kvm-b258.qcow2"]="oraclelinux" + ["OL8U10_aarch64-kvm-b122.qcow2"]="oraclelinux" +) + +#------------------------------------------------------------------------------- +# Cleanup Handler +#------------------------------------------------------------------------------- + +cleanup_on_exit() { + local exit_code=$? + if [ -d "$TEMP_DIR" ]; then + rm -rf "$TEMP_DIR" + log_info "Temporary directory $TEMP_DIR removed." + fi + + if [ $exit_code -ne 0 ]; then + log_error "Script exited with error code: $exit_code" + fi +} + +trap cleanup_on_exit EXIT INT TERM + +#------------------------------------------------------------------------------- +# Main Script Logic +#------------------------------------------------------------------------------- + +if command -v logger &> /dev/null; then + LOGGER_AVAILABLE=true +fi + +# Ensure base destination and log directories exist +mkdir -p "$DEST_DIR" +mkdir -p "$LOG_DIR" + +# Clean up old logs first +cleanup_old_logs + +log_info "Starting image download process." +log_info "Temporary directory: $TEMP_DIR" +log_info "Base destination directory: $DEST_DIR" +log_info "Log file: $LOG_FILE" + +# Inform about logger status +if [ "${LOGGER_AVAILABLE}" = true ]; then + log_info "Syslog logging enabled (tag: ${LOGGER_TAG})" +else + log_warn "Syslog logging disabled - logger utility not found" +fi + +# Loop through the image URLs +for filename in "${!IMAGE_URLS[@]}"; do + url="${IMAGE_URLS[$filename]}" + distro="${IMAGE_DISTROS[$filename]}" + + # Check if a distro is defined for the file + if [ -z "$distro" ]; then + log_error "No distribution directory defined for $filename. Skipping." + continue + fi + + distro_dest_dir="${DEST_DIR}/${distro}" + temp_filepath="${TEMP_DIR}/${filename}" + dest_filepath="${distro_dest_dir}/${filename}" + + log_info "--------------------------------------------------" + log_info "Starting download for: $filename" + log_info "URL: $url" + + # Download the file to the temporary directory + wget --progress=bar:force:noscroll -O "$temp_filepath" "$url" + download_status=$? + + if [ $download_status -ne 0 ]; then + # Handle download failure + log_error "Failed to download $filename from $url. wget exit code: $download_status" + else + # Handle download success + log_info "Successfully downloaded $filename to temporary location." + + # Ensure the specific distro directory exists + log_info "Ensuring destination directory exists: $distro_dest_dir" + mkdir -p "$distro_dest_dir" + + # Move the file to the destination directory, replacing any existing file + log_info "Moving $filename to $dest_filepath" + mv -f "$temp_filepath" "$dest_filepath" + move_status=$? + + if [ $move_status -ne 0 ]; then + log_error "Failed to move $filename to $dest_filepath. mv exit code: $move_status" + else + log_info "Successfully moved $filename." + fi + fi +done + +log_info "Generate checksum" +# Create md5 checksum +checksum_file="md5sum.txt" +sha512_checksum_file="sha512sum.txt" + +cd "$DEST_DIR" +find . -type f ! -iname '*.txt' -exec md5sum {} \; > "$checksum_file" +checksum_status=$? +if [ $checksum_status -ne 0 ]; then + log_error "Failed to create md5 checksum. md5sum exit code: $checksum_status" +else + log_info "Successfully created checksum file: $checksum_file" +fi + +find . -type f ! -iname '*.txt' -exec sha512sum {} \; > "$sha512_checksum_file" +sha512_checksum_status=$? +if [ $sha512_checksum_status -ne 0 ]; then + log_error "Failed to create sha512 checksum. sha512sum exit code: $sha512_checksum_status" +else + log_info "Successfully created checksum file: $sha512_checksum_file" +fi + +log_info "--------------------------------------------------" +log_info "Image download process finished." From 3626c515e8e801e7d4f9179508af7bbd2985098c Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Thu, 29 Jan 2026 17:06:16 +0530 Subject: [PATCH 103/126] server: fix listing resource limits (#12188) --- .../ResourceLimitManagerImpl.java | 76 ++++++++++--------- .../ResourceLimitManagerImplTest.java | 65 ++++++++++++++-- 2 files changed, 96 insertions(+), 45 deletions(-) diff --git a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java index 3ee3da504a3..f4548ed5e8f 100644 --- a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java +++ b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java @@ -21,6 +21,7 @@ import static com.cloud.utils.NumbersUtil.toHumanReadableSize; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; +import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -36,9 +37,6 @@ import java.util.stream.Stream; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.event.ActionEventUtils; -import com.cloud.event.EventTypes; -import com.cloud.utils.Ternary; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.response.AccountResponse; @@ -86,6 +84,8 @@ import com.cloud.dc.dao.VlanDao; import com.cloud.domain.Domain; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; +import com.cloud.event.ActionEventUtils; +import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceAllocationException; @@ -118,6 +118,7 @@ import com.cloud.user.AccountVO; import com.cloud.user.ResourceLimitService; import com.cloud.user.dao.AccountDao; import com.cloud.utils.Pair; +import com.cloud.utils.Ternary; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; @@ -804,45 +805,46 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim limits.addAll(foundLimits); } } else { - limits.addAll(foundLimits); - - // see if any limits are missing from the table, and if yes - get it from the config table and add - ResourceType[] resourceTypes = ResourceCount.ResourceType.values(); - if (foundLimits.size() != resourceTypes.length) { - List accountLimitStr = new ArrayList<>(); - List domainLimitStr = new ArrayList<>(); - for (ResourceLimitVO foundLimit : foundLimits) { - if (foundLimit.getAccountId() != null) { - accountLimitStr.add(foundLimit.getType().toString()); - } else { - domainLimitStr.add(foundLimit.getType().toString()); - } - } - - // get default from config values - if (isAccount) { - if (accountLimitStr.size() < resourceTypes.length) { - for (ResourceType rt : resourceTypes) { - if (!accountLimitStr.contains(rt.toString())) { - limits.add(new ResourceLimitVO(rt, findCorrectResourceLimitForAccount(_accountMgr.getAccount(accountId), rt, null), accountId, ResourceOwnerType.Account)); - } - } - } - } else { - if (domainLimitStr.size() < resourceTypes.length) { - for (ResourceType rt : resourceTypes) { - if (!domainLimitStr.contains(rt.toString())) { - limits.add(new ResourceLimitVO(rt, findCorrectResourceLimitForDomain(_domainDao.findById(domainId), rt, null), domainId, ResourceOwnerType.Domain)); - } - } - } - } - } + limits = getConsolidatedResourceLimitsForAllResourceTypes(accountId, domainId, foundLimits, isAccount); } addTaggedResourceLimits(limits, resourceType, isAccount ? ResourceOwnerType.Account : ResourceOwnerType.Domain, isAccount ? accountId : domainId, hostTags, storageTags); return limits; } + protected List getConsolidatedResourceLimitsForAllResourceTypes(Long accountId, Long domainId, + List foundLimits, boolean isAccount) { + List limits = new ArrayList<>(foundLimits); + + Set allResourceTypes = EnumSet.allOf(ResourceType.class); + Set foundUntaggedTypes = foundLimits.stream() + .filter(l -> StringUtils.isEmpty(l.getTag())) + .map(ResourceLimitVO::getType) + .collect(Collectors.toSet()); + + if (foundUntaggedTypes.containsAll(allResourceTypes)) { + return limits; + } + + ResourceOwnerType ownerType = isAccount ? ResourceOwnerType.Account : ResourceOwnerType.Domain; + long ownerId = isAccount ? accountId : domainId; + + for (ResourceType rt : allResourceTypes) { + if (foundUntaggedTypes.contains(rt)) { + continue; + } + long max; + if (isAccount) { + Account acct = _accountMgr.getAccount(accountId); + max = findCorrectResourceLimitForAccount(acct, rt, null); + } else { + DomainVO dom = _domainDao.findById(domainId); + max = findCorrectResourceLimitForDomain(dom, rt, null); + } + limits.add(new ResourceLimitVO(rt, max, ownerId, ownerType)); + } + return limits; + } + protected void addTaggedResourceLimits(List limits, List types, List tags, ResourceOwnerType ownerType, long ownerId) { if (CollectionUtils.isEmpty(tags)) { return; diff --git a/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java b/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java index e04ccc0ca13..42022ced75e 100644 --- a/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java +++ b/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java @@ -16,17 +16,17 @@ // under the License. package com.cloud.resourcelimit; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import java.lang.reflect.Field; import java.util.ArrayList; import java.util.Arrays; +import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; -import com.cloud.event.ActionEventUtils; -import com.cloud.event.EventTypes; -import com.cloud.utils.db.EntityManager; - import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.response.AccountResponse; import org.apache.cloudstack.api.response.DomainResponse; @@ -62,6 +62,8 @@ import com.cloud.configuration.dao.ResourceLimitDao; import com.cloud.domain.Domain; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; +import com.cloud.event.ActionEventUtils; +import com.cloud.event.EventTypes; import com.cloud.exception.ResourceAllocationException; import com.cloud.offering.DiskOffering; import com.cloud.offering.ServiceOffering; @@ -75,21 +77,19 @@ import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.Account; -import com.cloud.user.User; import com.cloud.user.AccountManager; import com.cloud.user.AccountVO; import com.cloud.user.ResourceLimitService; +import com.cloud.user.User; import com.cloud.user.dao.AccountDao; import com.cloud.utils.Pair; +import com.cloud.utils.db.EntityManager; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; import com.cloud.vpc.MockResourceLimitManagerImpl; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - @RunWith(MockitoJUnitRunner.class) public class ResourceLimitManagerImplTest { private Logger logger = LogManager.getLogger(ResourceLimitManagerImplTest.class); @@ -1397,4 +1397,53 @@ public class ResourceLimitManagerImplTest { domainId, ApiCommandResourceType.Domain.toString())); } } + + @Test + public void consolidatedResourceLimitsForAllResourceTypesWithAccountId() { + Long accountId = 1L; + Long domainId = null; + List foundLimits = new ArrayList<>(); + ResourceLimitVO limit = new ResourceLimitVO(Resource.ResourceType.cpu, 10L, accountId, Resource.ResourceOwnerType.Account); + foundLimits.add(limit); + + Mockito.when(accountManager.getAccount(accountId)).thenReturn(Mockito.mock(Account.class)); + Mockito.doReturn(20L).when(resourceLimitManager).findCorrectResourceLimitForAccount(Mockito.any(Account.class), Mockito.any(Resource.ResourceType.class), Mockito.isNull()); + + List result = resourceLimitManager.getConsolidatedResourceLimitsForAllResourceTypes(accountId, domainId, foundLimits, true); + + Assert.assertEquals(EnumSet.allOf(Resource.ResourceType.class).size(), result.size()); + Assert.assertTrue(result.contains(limit)); + } + + @Test + public void consolidatedResourceLimitsForAllResourceTypesWithDomainId() { + Long accountId = null; + Long domainId = 1L; + List foundLimits = new ArrayList<>(); + ResourceLimitVO limit = new ResourceLimitVO(Resource.ResourceType.memory, 15L, domainId, Resource.ResourceOwnerType.Domain); + foundLimits.add(limit); + + Mockito.when(domainDao.findById(domainId)).thenReturn(Mockito.mock(DomainVO.class)); + Mockito.doReturn(30L).when(resourceLimitManager).findCorrectResourceLimitForDomain(Mockito.any(Domain.class), Mockito.any(Resource.ResourceType.class), Mockito.isNull()); + + List result = resourceLimitManager.getConsolidatedResourceLimitsForAllResourceTypes(accountId, domainId, foundLimits, false); + + Assert.assertEquals(EnumSet.allOf(Resource.ResourceType.class).size(), result.size()); + Assert.assertTrue(result.contains(limit)); + } + + @Test + public void consolidatedResourceLimitsForAllResourceTypesWithEmptyFoundLimits() { + Long accountId = 1L; + Long domainId = null; + List foundLimits = new ArrayList<>(); + + Mockito.when(accountManager.getAccount(accountId)).thenReturn(Mockito.mock(Account.class)); + Mockito.doReturn(25L).when(resourceLimitManager).findCorrectResourceLimitForAccount(Mockito.any(Account.class), Mockito.any(Resource.ResourceType.class), Mockito.isNull()); + + List result = resourceLimitManager.getConsolidatedResourceLimitsForAllResourceTypes(accountId, domainId, foundLimits, true); + + Assert.assertEquals(EnumSet.allOf(Resource.ResourceType.class).size(), result.size()); + Assert.assertEquals(25L, result.get(0).getMax().longValue()); + } } From aef3df75771ecb2eaf237b3b803b87cf332a8554 Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Thu, 29 Jan 2026 13:47:08 +0100 Subject: [PATCH 104/126] server: pass network label of physical network when plug nic for private gateway on hypervisor (#11846) --- .../network/dao/PhysicalNetworkTrafficTypeDaoImpl.java | 4 +++- .../com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java | 2 +- .../src/main/java/com/cloud/network/NetworkModelImpl.java | 6 +++++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java index 4811b59d31e..6504bb1f3c8 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java @@ -137,7 +137,9 @@ public class PhysicalNetworkTrafficTypeDaoImpl extends GenericDaoBase tag = customSearch(sc, null); return tag.size() == 0 ? null : tag.get(0); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java index 3b66529ccaf..b4f7fbd6dac 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java @@ -232,7 +232,7 @@ public class BridgeVifDriver extends VifDriverBase { String brName = createVnetBr(vNetId, trafficLabel, protocol); intf.defBridgeNet(brName, null, nic.getMac(), getGuestNicModel(guestOsType, nicAdapter), networkRateKBps); } else { - String brName = createVnetBr(vNetId, _bridges.get("private"), protocol); + String brName = createVnetBr(vNetId, _bridges.get("guest"), protocol); intf.defBridgeNet(brName, null, nic.getMac(), getGuestNicModel(guestOsType, nicAdapter), networkRateKBps); } } else { diff --git a/server/src/main/java/com/cloud/network/NetworkModelImpl.java b/server/src/main/java/com/cloud/network/NetworkModelImpl.java index 4a5b7199430..86791b87851 100644 --- a/server/src/main/java/com/cloud/network/NetworkModelImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkModelImpl.java @@ -1442,11 +1442,11 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi return null; } + NetworkOffering offering = _entityMgr.findById(NetworkOffering.class, network.getNetworkOfferingId()); Long physicalNetworkId = null; if (effectiveTrafficType != TrafficType.Guest) { physicalNetworkId = getNonGuestNetworkPhysicalNetworkId(network, effectiveTrafficType); } else { - NetworkOffering offering = _entityMgr.findById(NetworkOffering.class, network.getNetworkOfferingId()); physicalNetworkId = network.getPhysicalNetworkId(); if (physicalNetworkId == null) { physicalNetworkId = findPhysicalNetworkId(network.getDataCenterId(), offering.getTags(), offering.getTrafficType()); @@ -1459,6 +1459,10 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi return null; } + if (offering != null && TrafficType.Guest.equals(offering.getTrafficType()) && offering.isSystemOnly()) { + // For private gateway, do not check the Guest traffic type + return _pNTrafficTypeDao.getNetworkTag(physicalNetworkId, null, hType); + } return _pNTrafficTypeDao.getNetworkTag(physicalNetworkId, effectiveTrafficType, hType); } From 8c2a0308165173812d146d835bf31c3e515daff4 Mon Sep 17 00:00:00 2001 From: Daniel Augusto Veronezi Salvador <38945620+GutoVeronezi@users.noreply.github.com> Date: Thu, 29 Jan 2026 09:51:22 -0300 Subject: [PATCH 105/126] Fix query filter and units tests (#12184) --- .../com/cloud/network/dao/NetworkDaoImpl.java | 2 +- .../cloud/network/dao/NetworkDaoImplTest.java | 31 +++++++------------ 2 files changed, 13 insertions(+), 20 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java index 8066b89b4b9..4e8b6204f72 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java @@ -598,7 +598,7 @@ public class NetworkDaoImpl extends GenericDaoBaseimplements Ne public List listByPhysicalNetworkTrafficType(final long physicalNetworkId, final TrafficType trafficType) { final SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("trafficType", trafficType); - sc.setParameters("physicalNetwork", physicalNetworkId); + sc.setParameters("physicalNetworkId", physicalNetworkId); return listBy(sc); } diff --git a/engine/schema/src/test/java/com/cloud/network/dao/NetworkDaoImplTest.java b/engine/schema/src/test/java/com/cloud/network/dao/NetworkDaoImplTest.java index ab5f4352105..a78eab568af 100644 --- a/engine/schema/src/test/java/com/cloud/network/dao/NetworkDaoImplTest.java +++ b/engine/schema/src/test/java/com/cloud/network/dao/NetworkDaoImplTest.java @@ -22,7 +22,6 @@ package com.cloud.network.dao; import com.cloud.network.Networks; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.TransactionLegacy; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; @@ -30,7 +29,6 @@ import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; - import java.util.List; @RunWith(MockitoJUnitRunner.class) @@ -46,26 +44,21 @@ public class NetworkDaoImplTest { List listNetworkVoMock; @Test - public void listByPhysicalNetworkTrafficTypeTestSetParametersValidation() throws Exception { + public void listByPhysicalNetworkTrafficTypeTestSetParametersValidation() { NetworkDaoImpl networkDaoImplSpy = Mockito.spy(NetworkDaoImpl.class); - TransactionLegacy txn = TransactionLegacy.open("runNetworkDaoImplTest"); - try { - networkDaoImplSpy.AllFieldsSearch = searchBuilderNetworkVoMock; - Mockito.doReturn(searchCriteriaNetworkVoMock).when(searchBuilderNetworkVoMock).create(); - Mockito.doNothing().when(searchCriteriaNetworkVoMock).setParameters(Mockito.anyString(), Mockito.any()); - Mockito.doReturn(listNetworkVoMock).when(networkDaoImplSpy).listBy(Mockito.any(SearchCriteria.class)); + networkDaoImplSpy.AllFieldsSearch = searchBuilderNetworkVoMock; + Mockito.doReturn(searchCriteriaNetworkVoMock).when(searchBuilderNetworkVoMock).create(); + Mockito.doNothing().when(searchCriteriaNetworkVoMock).setParameters(Mockito.anyString(), Mockito.any()); + Mockito.doReturn(listNetworkVoMock).when(networkDaoImplSpy).listBy(Mockito.any(SearchCriteria.class)); - long expectedPhysicalNetwork = 2513l; + long expectedPhysicalNetwork = 2513l; - for (Networks.TrafficType trafficType : Networks.TrafficType.values()) { - List result = networkDaoImplSpy.listByPhysicalNetworkTrafficType(expectedPhysicalNetwork, trafficType); - Assert.assertEquals(listNetworkVoMock, result); - Mockito.verify(searchCriteriaNetworkVoMock).setParameters("trafficType", trafficType); - } - - Mockito.verify(searchCriteriaNetworkVoMock, Mockito.times(Networks.TrafficType.values().length)).setParameters("physicalNetwork", expectedPhysicalNetwork); - } finally { - txn.close(); + for (Networks.TrafficType trafficType : Networks.TrafficType.values()) { + List result = networkDaoImplSpy.listByPhysicalNetworkTrafficType(expectedPhysicalNetwork, trafficType); + Assert.assertEquals(listNetworkVoMock, result); + Mockito.verify(searchCriteriaNetworkVoMock).setParameters("trafficType", trafficType); } + + Mockito.verify(searchCriteriaNetworkVoMock, Mockito.times(Networks.TrafficType.values().length)).setParameters("physicalNetworkId", expectedPhysicalNetwork); } } From 7d52cd0e43a5e225f10bce251f7cd357060382cd Mon Sep 17 00:00:00 2001 From: Fabricio Duarte Date: Thu, 29 Jan 2026 10:38:12 -0300 Subject: [PATCH 106/126] Fix calculation of the next time that Usage will execute in `removeRawUsageRecords` (#12518) * Fix calculation of the next time that Usage will execute in `removeRawUsageRecords` * Address copilot reviews --- .../com/cloud/usage/UsageServiceImpl.java | 62 ++++---- .../com/cloud/usage/UsageManagerImpl.java | 37 ++--- .../cloudstack/utils/usage/UsageUtils.java | 51 +++++++ .../utils/usage/UsageUtilsTest.java | 135 ++++++++++++++++++ 4 files changed, 232 insertions(+), 53 deletions(-) create mode 100644 utils/src/test/java/org/apache/cloudstack/utils/usage/UsageUtilsTest.java diff --git a/server/src/main/java/com/cloud/usage/UsageServiceImpl.java b/server/src/main/java/com/cloud/usage/UsageServiceImpl.java index edaa22c3bcf..de8d4633d22 100644 --- a/server/src/main/java/com/cloud/usage/UsageServiceImpl.java +++ b/server/src/main/java/com/cloud/usage/UsageServiceImpl.java @@ -17,7 +17,6 @@ package com.cloud.usage; import java.util.ArrayList; -import java.util.Calendar; import java.util.Date; import java.util.List; import java.util.Map; @@ -35,6 +34,7 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.usage.Usage; import org.apache.cloudstack.usage.UsageService; import org.apache.cloudstack.usage.UsageTypes; +import org.apache.cloudstack.utils.usage.UsageUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; import org.jetbrains.annotations.NotNull; @@ -127,14 +127,25 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag @Inject private NetworkOfferingDao _networkOfferingDao; + private TimeZone usageExecutionTimeZone = TimeZone.getTimeZone("GMT"); + + private static final long REMOVE_RAW_USAGE_RECORDS_WINDOW_IN_MS = 15 * 60 * 1000; + public UsageServiceImpl() { } @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); + String timeZoneStr = ObjectUtils.defaultIfNull(_configDao.getValue(Config.UsageAggregationTimezone.toString()), "GMT"); _usageTimezone = TimeZone.getTimeZone(timeZoneStr); + + String executionTimeZone = _configDao.getValue(Config.UsageExecutionTimezone.toString()); + if (executionTimeZone != null) { + usageExecutionTimeZone = TimeZone.getTimeZone(executionTimeZone); + } + return true; } @@ -465,35 +476,28 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag @Override public boolean removeRawUsageRecords(RemoveRawUsageRecordsCmd cmd) throws InvalidParameterValueException { Integer interval = cmd.getInterval(); - if (interval != null && interval > 0 ) { - String jobExecTime = _configDao.getValue(Config.UsageStatsJobExecTime.toString()); - if (jobExecTime != null ) { - String[] segments = jobExecTime.split(":"); - if (segments.length == 2) { - String timeZoneStr = _configDao.getValue(Config.UsageExecutionTimezone.toString()); - if (timeZoneStr == null) { - timeZoneStr = "GMT"; - } - TimeZone tz = TimeZone.getTimeZone(timeZoneStr); - Calendar cal = Calendar.getInstance(tz); - cal.setTime(new Date()); - long curTS = cal.getTimeInMillis(); - cal.set(Calendar.HOUR_OF_DAY, Integer.parseInt(segments[0])); - cal.set(Calendar.MINUTE, Integer.parseInt(segments[1])); - cal.set(Calendar.SECOND, 0); - cal.set(Calendar.MILLISECOND, 0); - long execTS = cal.getTimeInMillis(); - logger.debug("Trying to remove old raw cloud_usage records older than " + interval + " day(s), current time=" + curTS + " next job execution time=" + execTS); - // Let's avoid cleanup when job runs and around a 15 min interval - if (Math.abs(curTS - execTS) < 15 * 60 * 1000) { - return false; - } - } - } - _usageDao.expungeAllOlderThan(interval, ConfigurationManagerImpl.DELETE_QUERY_BATCH_SIZE.value()); - } else { - throw new InvalidParameterValueException("Invalid interval value. Interval to remove cloud_usage records should be greater than 0"); + if (interval == null || interval <= 0) { + throw new InvalidParameterValueException("Interval should be greater than 0."); } + + String jobExecTime = _configDao.getValue(Config.UsageStatsJobExecTime.toString()); + Date previousJobExecTime = UsageUtils.getPreviousJobExecutionTime(usageExecutionTimeZone, jobExecTime); + Date nextJobExecTime = UsageUtils.getNextJobExecutionTime(usageExecutionTimeZone, jobExecTime); + if (ObjectUtils.allNotNull(previousJobExecTime, nextJobExecTime)) { + logger.debug("Next Usage job is scheduled to execute at [{}]; previous execution was at [{}].", + DateUtil.displayDateInTimezone(usageExecutionTimeZone, nextJobExecTime), DateUtil.displayDateInTimezone(usageExecutionTimeZone, previousJobExecTime)); + Date now = new Date(); + if (nextJobExecTime.getTime() - now.getTime() < REMOVE_RAW_USAGE_RECORDS_WINDOW_IN_MS) { + logger.info("Not removing any cloud_usage records because the next Usage job is scheduled to execute in less than {} minute(s).", REMOVE_RAW_USAGE_RECORDS_WINDOW_IN_MS / 60000); + return false; + } else if (now.getTime() - previousJobExecTime.getTime() < REMOVE_RAW_USAGE_RECORDS_WINDOW_IN_MS) { + logger.info("Not removing any cloud_usage records because the last Usage job executed in less than {} minute(s) ago.", REMOVE_RAW_USAGE_RECORDS_WINDOW_IN_MS / 60000); + return false; + } + } + + logger.info("Removing cloud_usage records older than {} day(s).", interval); + _usageDao.expungeAllOlderThan(interval, ConfigurationManagerImpl.DELETE_QUERY_BATCH_SIZE.value()); return true; } } diff --git a/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java b/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java index 30cdfcf21f0..9da64889fc3 100644 --- a/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java +++ b/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java @@ -198,7 +198,9 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna private Future _heartbeat = null; private Future _sanity = null; private boolean usageSnapshotSelection = false; + private static TimeZone usageAggregationTimeZone = TimeZone.getTimeZone("GMT"); + private static TimeZone usageExecutionTimeZone = TimeZone.getTimeZone("GMT"); public UsageManagerImpl() { } @@ -253,6 +255,9 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna if (aggregationTimeZone != null && !aggregationTimeZone.isEmpty()) { usageAggregationTimeZone = TimeZone.getTimeZone(aggregationTimeZone); } + if (execTimeZone != null) { + usageExecutionTimeZone = TimeZone.getTimeZone(execTimeZone); + } try { if ((execTime == null) || (aggregationRange == null)) { @@ -261,34 +266,18 @@ public class UsageManagerImpl extends ManagerBase implements UsageManager, Runna throw new ConfigurationException("Missing configuration values for usage job, usage.stats.job.exec.time = " + execTime + ", usage.stats.job.aggregation.range = " + aggregationRange); } - String[] execTimeSegments = execTime.split(":"); - if (execTimeSegments.length != 2) { - logger.error("Unable to parse usage.stats.job.exec.time"); - throw new ConfigurationException("Unable to parse usage.stats.job.exec.time '" + execTime + "'"); - } - int hourOfDay = Integer.parseInt(execTimeSegments[0]); - int minutes = Integer.parseInt(execTimeSegments[1]); - - Date currentDate = new Date(); - _jobExecTime.setTime(currentDate); - - _jobExecTime.set(Calendar.HOUR_OF_DAY, hourOfDay); - _jobExecTime.set(Calendar.MINUTE, minutes); - _jobExecTime.set(Calendar.SECOND, 0); - _jobExecTime.set(Calendar.MILLISECOND, 0); - - TimeZone jobExecTimeZone = execTimeZone != null ? TimeZone.getTimeZone(execTimeZone) : Calendar.getInstance().getTimeZone(); - _jobExecTime.setTimeZone(jobExecTimeZone); - - // if the hour to execute the job has already passed, roll the day forward to the next day - if (_jobExecTime.getTime().before(currentDate)) { - _jobExecTime.roll(Calendar.DAY_OF_YEAR, true); + + Date nextJobExecTime = UsageUtils.getNextJobExecutionTime(usageExecutionTimeZone, execTime); + if (nextJobExecTime == null) { + throw new ConfigurationException(String.format("Unable to parse configuration 'usage.stats.job.exec.time' value [%s].", execTime)); } + _jobExecTime.setTimeZone(usageExecutionTimeZone); + _jobExecTime.setTime(nextJobExecTime); logger.info("Usage is configured to execute in time zone [{}], at [{}], each [{}] minutes; the current time in that timezone is [{}] and the " + "next job is scheduled to execute at [{}]. During its execution, Usage will aggregate stats according to the time zone [{}] defined in global setting [usage.aggregation.timezone].", - jobExecTimeZone.getID(), execTime, aggregationRange, DateUtil.displayDateInTimezone(jobExecTimeZone, currentDate), - DateUtil.displayDateInTimezone(jobExecTimeZone, _jobExecTime.getTime()), usageAggregationTimeZone.getID()); + usageExecutionTimeZone.getID(), execTime, aggregationRange, DateUtil.displayDateInTimezone(usageExecutionTimeZone, new Date()), + DateUtil.displayDateInTimezone(usageExecutionTimeZone, _jobExecTime.getTime()), usageAggregationTimeZone.getID()); _aggregationDuration = Integer.parseInt(aggregationRange); if (_aggregationDuration < UsageUtils.USAGE_AGGREGATION_RANGE_MIN) { diff --git a/utils/src/main/java/org/apache/cloudstack/utils/usage/UsageUtils.java b/utils/src/main/java/org/apache/cloudstack/utils/usage/UsageUtils.java index a97aed15d36..861788d1918 100644 --- a/utils/src/main/java/org/apache/cloudstack/utils/usage/UsageUtils.java +++ b/utils/src/main/java/org/apache/cloudstack/utils/usage/UsageUtils.java @@ -19,6 +19,57 @@ package org.apache.cloudstack.utils.usage; +import com.cloud.utils.DateUtil; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.util.Calendar; +import java.util.Date; +import java.util.TimeZone; + public class UsageUtils { + protected static Logger logger = LogManager.getLogger(UsageUtils.class); + public static final int USAGE_AGGREGATION_RANGE_MIN = 1; + + public static Date getNextJobExecutionTime(TimeZone usageTimeZone, String jobExecTimeConfig) { + return getJobExecutionTime(usageTimeZone, jobExecTimeConfig, true); + } + + public static Date getPreviousJobExecutionTime(TimeZone usageTimeZone, String jobExecTimeConfig) { + return getJobExecutionTime(usageTimeZone, jobExecTimeConfig, false); + } + + protected static Date getJobExecutionTime(TimeZone usageTimeZone, String jobExecTimeConfig, boolean next) { + String[] execTimeSegments = jobExecTimeConfig.split(":"); + if (execTimeSegments.length != 2) { + logger.warn("Unable to parse configuration 'usage.stats.job.exec.time'."); + return null; + } + int hourOfDay; + int minutes; + try { + hourOfDay = Integer.parseInt(execTimeSegments[0]); + minutes = Integer.parseInt(execTimeSegments[1]); + } catch (NumberFormatException e) { + logger.warn("Unable to parse configuration 'usage.stats.job.exec.time' due to non-numeric values in [{}].", jobExecTimeConfig, e); + return null; + } + + Date currentDate = DateUtil.currentGMTTime(); + Calendar jobExecTime = Calendar.getInstance(usageTimeZone); + jobExecTime.setTime(currentDate); + jobExecTime.set(Calendar.HOUR_OF_DAY, hourOfDay); + jobExecTime.set(Calendar.MINUTE, minutes); + jobExecTime.set(Calendar.SECOND, 0); + jobExecTime.set(Calendar.MILLISECOND, 0); + + if (next && jobExecTime.getTime().before(currentDate)) { + jobExecTime.add(Calendar.DAY_OF_YEAR, 1); + } else if (!next && jobExecTime.getTime().after(currentDate)) { + jobExecTime.add(Calendar.DAY_OF_YEAR, -1); + } + + return jobExecTime.getTime(); + } } diff --git a/utils/src/test/java/org/apache/cloudstack/utils/usage/UsageUtilsTest.java b/utils/src/test/java/org/apache/cloudstack/utils/usage/UsageUtilsTest.java new file mode 100644 index 00000000000..8b9b4910e39 --- /dev/null +++ b/utils/src/test/java/org/apache/cloudstack/utils/usage/UsageUtilsTest.java @@ -0,0 +1,135 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.utils.usage; + +import com.cloud.utils.DateUtil; +import junit.framework.TestCase; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.Date; +import java.util.TimeZone; + +@RunWith(MockitoJUnitRunner.class) +public class UsageUtilsTest extends TestCase { + + TimeZone usageTimeZone = TimeZone.getTimeZone("GMT-3"); + + @Test + public void getJobExecutionTimeTestReturnsNullWhenConfigurationValueIsInvalid() { + Date result = UsageUtils.getNextJobExecutionTime(usageTimeZone, "test"); + assertNull(result); + } + + @Test + public void getJobExecutionTimeTestReturnsExpectedDateWhenNextIsTrueAndExecutionTimeHasNotPassed() { + Date currentDate = new Date(); + currentDate.setTime(1724296800000L); + + try (MockedStatic dateUtilMockedStatic = Mockito.mockStatic(DateUtil.class)) { + dateUtilMockedStatic.when(DateUtil::currentGMTTime).thenReturn(currentDate); + + Date result = UsageUtils.getJobExecutionTime(usageTimeZone, "00:30", true); + + Assert.assertNotNull(result); + Assert.assertEquals(1724297400000L, result.getTime()); + } + } + + @Test + public void getJobExecutionTimeTestReturnsExpectedDateWhenNextIsTrueAndExecutionTimeHasPassed() { + Date currentDate = new Date(); + currentDate.setTime(1724297460000L); + + try (MockedStatic dateUtilMockedStatic = Mockito.mockStatic(DateUtil.class)) { + dateUtilMockedStatic.when(DateUtil::currentGMTTime).thenReturn(currentDate); + + Date result = UsageUtils.getJobExecutionTime(usageTimeZone, "00:30", true); + + Assert.assertNotNull(result); + Assert.assertEquals(1724383800000L, result.getTime()); + } + } + + @Test + public void getJobExecutionTimeTestReturnsExpectedDateWhenNextIsFalseAndExecutionTimeHasNotPassed() { + Date currentDate = new Date(); + currentDate.setTime(1724296800000L); + + try (MockedStatic dateUtilMockedStatic = Mockito.mockStatic(DateUtil.class)) { + dateUtilMockedStatic.when(DateUtil::currentGMTTime).thenReturn(currentDate); + + Date result = UsageUtils.getJobExecutionTime(usageTimeZone, "00:30", false); + + Assert.assertNotNull(result); + Assert.assertEquals(1724211000000L, result.getTime()); + } + } + + @Test + public void getJobExecutionTimeTestReturnsExpectedDateWhenNextIsFalseAndExecutionTimeHasPassed() { + Date currentDate = new Date(); + currentDate.setTime(1724297460000L); + + try (MockedStatic dateUtilMockedStatic = Mockito.mockStatic(DateUtil.class)) { + dateUtilMockedStatic.when(DateUtil::currentGMTTime).thenReturn(currentDate); + + Date result = UsageUtils.getJobExecutionTime(usageTimeZone, "00:30", false); + + Assert.assertNotNull(result); + Assert.assertEquals(1724297400000L, result.getTime()); + } + } + + @Test + public void getJobExecutionTimeTestReturnsExpectedDateWhenNextExecutionIsOnNextYear() { + Date currentDate = new Date(); + currentDate.setTime(1767236340000L); + + try (MockedStatic dateUtilMockedStatic = Mockito.mockStatic(DateUtil.class)) { + dateUtilMockedStatic.when(DateUtil::currentGMTTime).thenReturn(currentDate); + + Date result = UsageUtils.getJobExecutionTime(usageTimeZone, "00:00", true); + + Assert.assertNotNull(result); + Assert.assertEquals(1767236400000L, result.getTime()); + } + } + + @Test + public void getJobExecutionTimeTestReturnsExpectedDateWhenPreviousExecutionWasOnPreviousYear() { + Date currentDate = new Date(); + currentDate.setTime(1767236460000L); + + try (MockedStatic dateUtilMockedStatic = Mockito.mockStatic(DateUtil.class)) { + dateUtilMockedStatic.when(DateUtil::currentGMTTime).thenReturn(currentDate); + + Date result = UsageUtils.getJobExecutionTime(usageTimeZone, "23:59", false); + + Assert.assertNotNull(result); + Assert.assertEquals(1767236340000L, result.getTime()); + } + } + +} From 0e94f7d7717857884bd2568bd7917faf10cd6e48 Mon Sep 17 00:00:00 2001 From: Vishesh <8760112+vishesh92@users.noreply.github.com> Date: Thu, 29 Jan 2026 19:47:11 +0530 Subject: [PATCH 107/126] UI: Show applied search filters (#9520) --- ui/public/config.json | 3 +- ui/src/components/view/SearchFilter.vue | 559 ++++++++++++++++++++++++ ui/src/views/AutogenView.vue | 48 ++ ui/tests/unit/views/AutogenView.spec.js | 4 + 4 files changed, 613 insertions(+), 1 deletion(-) create mode 100644 ui/src/components/view/SearchFilter.vue diff --git a/ui/public/config.json b/ui/public/config.json index 64d10284186..e3d1d30b95f 100644 --- a/ui/public/config.json +++ b/ui/public/config.json @@ -98,5 +98,6 @@ "multipleServer": false, "allowSettingTheme": true, "docHelpMappings": {}, - "notifyLatestCSVersion": true + "notifyLatestCSVersion": true, + "showSearchFilters": true } diff --git a/ui/src/components/view/SearchFilter.vue b/ui/src/components/view/SearchFilter.vue new file mode 100644 index 00000000000..ed950c094a3 --- /dev/null +++ b/ui/src/components/view/SearchFilter.vue @@ -0,0 +1,559 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + + + + diff --git a/ui/src/views/AutogenView.vue b/ui/src/views/AutogenView.vue index a01e300c1c9..6acc81d6a02 100644 --- a/ui/src/views/AutogenView.vue +++ b/ui/src/views/AutogenView.vue @@ -106,6 +106,16 @@ @change-filter="changeFilter"/> + + + @@ -469,6 +479,7 @@ import ListView from '@/components/view/ListView' import ResourceView from '@/components/view/ResourceView' import ActionButton from '@/components/view/ActionButton' import SearchView from '@/components/view/SearchView' +import SearchFilter from '@/components/view/SearchFilter' import OsLogo from '@/components/widgets/OsLogo' import ResourceIcon from '@/components/view/ResourceIcon' import BulkActionProgress from '@/components/view/BulkActionProgress' @@ -482,6 +493,7 @@ export default { ListView, ActionButton, SearchView, + SearchFilter, BulkActionProgress, TooltipLabel, OsLogo, @@ -1126,6 +1138,42 @@ export default { eventBus.emit('action-closing', { action: this.currentAction }) this.closeAction() }, + getActiveFilters () { + const queryParams = Object.assign({}, this.$route.query) + const activeFilters = [] + for (const filter in queryParams) { + if (!filter.startsWith('tags[')) { + activeFilters.push({ + key: filter, + value: queryParams[filter], + isTag: false + }) + } else if (filter.endsWith('].key')) { + const tagIdx = filter.split('[')[1].split(']')[0] + const tagKey = queryParams[`tags[${tagIdx}].key`] + const tagValue = queryParams[`tags[${tagIdx}].value`] + activeFilters.push({ + key: tagKey, + value: tagValue, + isTag: true, + tagIdx: tagIdx + }) + } + } + return activeFilters + }, + removeFilter (filter) { + const queryParams = Object.assign({}, this.$route.query) + if (filter.isTag) { + delete queryParams[`tags[${filter.tagIdx}].key`] + delete queryParams[`tags[${filter.tagIdx}].value`] + } else { + delete queryParams[filter.key] + } + queryParams.page = '1' + queryParams.pagesize = String(this.pageSize) + this.$router.push({ query: queryParams }) + }, onRowSelectionChange (selection) { this.selectedRowKeys = selection if (selection?.length > 0) { diff --git a/ui/tests/unit/views/AutogenView.spec.js b/ui/tests/unit/views/AutogenView.spec.js index eb0352f99d1..05b565ab9c9 100644 --- a/ui/tests/unit/views/AutogenView.spec.js +++ b/ui/tests/unit/views/AutogenView.spec.js @@ -113,6 +113,10 @@ store = common.createMockStore(state, actions, mutations) i18n = common.createMockI18n('en', mockData.messages) mocks = { + $config: { + showSearchFilters: true, + docBase: 'http://docs.cloudstack.apache.org/en/latest' + }, $notifyError: jest.fn((error) => { return error }), From a6ccde44c4bc3c2b4623aca6b55500a722144763 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Thu, 29 Jan 2026 21:41:21 +0530 Subject: [PATCH 108/126] kvm: honour disk controller for vm during attach volume (#12452) --- .../resource/LibvirtComputingResource.java | 11 +- .../hypervisor/kvm/resource/LibvirtVMDef.java | 9 ++ .../kvm/storage/KVMStorageProcessor.java | 121 ++++++++++-------- 3 files changed, 82 insertions(+), 59 deletions(-) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index b66a838a3a5..c7b2747a777 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -4371,12 +4371,11 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv String dataDiskController = details.get(VmDetailConstants.DATA_DISK_CONTROLLER); if (StringUtils.isNotBlank(dataDiskController)) { - LOGGER.debug("Passed custom disk controller for DATA disk " + dataDiskController); - for (DiskDef.DiskBus bus : DiskDef.DiskBus.values()) { - if (bus.toString().equalsIgnoreCase(dataDiskController)) { - LOGGER.debug("Found matching enum for disk controller for DATA disk " + dataDiskController); - return bus; - } + LOGGER.debug("Passed custom disk controller for DATA disk {}", dataDiskController); + DiskDef.DiskBus bus = DiskDef.DiskBus.fromValue(dataDiskController); + if (bus != null) { + LOGGER.debug("Found matching enum for disk controller for DATA disk {}", dataDiskController); + return bus; } } return null; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java index a38e7a02357..696e71bea80 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java @@ -686,6 +686,15 @@ public class LibvirtVMDef { _bus = bus; } + public static DiskBus fromValue(String bus) { + for (DiskBus b : DiskBus.values()) { + if (b.toString().equalsIgnoreCase(bus)) { + return b; + } + } + return null; + } + @Override public String toString() { return _bus; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index 33bd41ee6ba..87ca531bb74 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -1320,26 +1320,27 @@ public class KVMStorageProcessor implements StorageProcessor { /** * Attaches or detaches a disk to an instance. - * @param conn libvirt connection - * @param attach boolean that determines whether the device will be attached or detached - * @param vmName instance name - * @param attachingDisk kvm physical disk - * @param devId device id in instance + * @param conn libvirt connection + * @param attach boolean that determines whether the device will be attached or detached + * @param vmName instance name + * @param attachingDisk kvm physical disk + * @param devId device id in instance * @param serial - * @param bytesReadRate bytes read rate - * @param bytesReadRateMax bytes read rate max - * @param bytesReadRateMaxLength bytes read rate max length - * @param bytesWriteRate bytes write rate - * @param bytesWriteRateMax bytes write rate amx + * @param bytesReadRate bytes read rate + * @param bytesReadRateMax bytes read rate max + * @param bytesReadRateMaxLength bytes read rate max length + * @param bytesWriteRate bytes write rate + * @param bytesWriteRateMax bytes write rate amx * @param bytesWriteRateMaxLength bytes write rate max length - * @param iopsReadRate iops read rate - * @param iopsReadRateMax iops read rate max - * @param iopsReadRateMaxLength iops read rate max length - * @param iopsWriteRate iops write rate - * @param iopsWriteRateMax iops write rate max - * @param iopsWriteRateMaxLength iops write rate max length - * @param cacheMode cache mode - * @param encryptDetails encrypt details + * @param iopsReadRate iops read rate + * @param iopsReadRateMax iops read rate max + * @param iopsReadRateMaxLength iops read rate max length + * @param iopsWriteRate iops write rate + * @param iopsWriteRateMax iops write rate max + * @param iopsWriteRateMaxLength iops write rate max length + * @param cacheMode cache mode + * @param encryptDetails encrypt details + * @param controllerInfo * @throws LibvirtException * @throws InternalErrorException */ @@ -1347,37 +1348,38 @@ public class KVMStorageProcessor implements StorageProcessor { final String serial, final Long bytesReadRate, final Long bytesReadRateMax, final Long bytesReadRateMaxLength, final Long bytesWriteRate, final Long bytesWriteRateMax, final Long bytesWriteRateMaxLength, final Long iopsReadRate, final Long iopsReadRateMax, final Long iopsReadRateMaxLength, final Long iopsWriteRate, final Long iopsWriteRateMax, - final Long iopsWriteRateMaxLength, final String cacheMode, final DiskDef.LibvirtDiskEncryptDetails encryptDetails, Map details) + final Long iopsWriteRateMaxLength, final String cacheMode, final DiskDef.LibvirtDiskEncryptDetails encryptDetails, Map details, Map controllerInfo) throws LibvirtException, InternalErrorException { attachOrDetachDisk(conn, attach, vmName, attachingDisk, devId, serial, bytesReadRate, bytesReadRateMax, bytesReadRateMaxLength, bytesWriteRate, bytesWriteRateMax, bytesWriteRateMaxLength, iopsReadRate, iopsReadRateMax, iopsReadRateMaxLength, iopsWriteRate, - iopsWriteRateMax, iopsWriteRateMaxLength, cacheMode, encryptDetails, 0l, details); + iopsWriteRateMax, iopsWriteRateMaxLength, cacheMode, encryptDetails, 0l, details, controllerInfo); } /** * * Attaches or detaches a disk to an instance. - * @param conn libvirt connection - * @param attach boolean that determines whether the device will be attached or detached - * @param vmName instance name - * @param attachingDisk kvm physical disk - * @param devId device id in instance + * @param conn libvirt connection + * @param attach boolean that determines whether the device will be attached or detached + * @param vmName instance name + * @param attachingDisk kvm physical disk + * @param devId device id in instance * @param serial - * @param bytesReadRate bytes read rate - * @param bytesReadRateMax bytes read rate max - * @param bytesReadRateMaxLength bytes read rate max length - * @param bytesWriteRate bytes write rate - * @param bytesWriteRateMax bytes write rate amx + * @param bytesReadRate bytes read rate + * @param bytesReadRateMax bytes read rate max + * @param bytesReadRateMaxLength bytes read rate max length + * @param bytesWriteRate bytes write rate + * @param bytesWriteRateMax bytes write rate amx * @param bytesWriteRateMaxLength bytes write rate max length - * @param iopsReadRate iops read rate - * @param iopsReadRateMax iops read rate max - * @param iopsReadRateMaxLength iops read rate max length - * @param iopsWriteRate iops write rate - * @param iopsWriteRateMax iops write rate max - * @param iopsWriteRateMaxLength iops write rate max length - * @param cacheMode cache mode - * @param encryptDetails encrypt details - * @param waitDetachDevice value set in milliseconds to wait before assuming device removal failed + * @param iopsReadRate iops read rate + * @param iopsReadRateMax iops read rate max + * @param iopsReadRateMaxLength iops read rate max length + * @param iopsWriteRate iops write rate + * @param iopsWriteRateMax iops write rate max + * @param iopsWriteRateMaxLength iops write rate max length + * @param cacheMode cache mode + * @param encryptDetails encrypt details + * @param waitDetachDevice value set in milliseconds to wait before assuming device removal failed + * @param controllerInfo * @throws LibvirtException * @throws InternalErrorException */ @@ -1386,7 +1388,7 @@ public class KVMStorageProcessor implements StorageProcessor { final Long bytesWriteRate, final Long bytesWriteRateMax, final Long bytesWriteRateMaxLength, final Long iopsReadRate, final Long iopsReadRateMax, final Long iopsReadRateMaxLength, final Long iopsWriteRate, final Long iopsWriteRateMax, final Long iopsWriteRateMaxLength, final String cacheMode, final DiskDef.LibvirtDiskEncryptDetails encryptDetails, - long waitDetachDevice, Map details) + long waitDetachDevice, Map details, Map controllerInfo) throws LibvirtException, InternalErrorException { List disks = null; @@ -1423,17 +1425,7 @@ public class KVMStorageProcessor implements StorageProcessor { return; } } else { - DiskDef.DiskBus busT = DiskDef.DiskBus.VIRTIO; - for (final DiskDef disk : disks) { - if (disk.getDeviceType() == DeviceType.DISK) { - if (disk.getBusType() == DiskDef.DiskBus.SCSI) { - busT = DiskDef.DiskBus.SCSI; - } else if (disk.getBusType() == DiskDef.DiskBus.VIRTIOBLK) { - busT = DiskDef.DiskBus.VIRTIOBLK; - } - break; - } - } + DiskDef.DiskBus busT = getAttachDiskBusType(devId, disks, controllerInfo); diskdef = new DiskDef(); if (busT == DiskDef.DiskBus.SCSI || busT == DiskDef.DiskBus.VIRTIOBLK) { diskdef.setQemuDriver(true); @@ -1538,6 +1530,28 @@ public class KVMStorageProcessor implements StorageProcessor { } } + protected DiskDef.DiskBus getAttachDiskBusType(int deviceId, List disks, Map controllerInfo) { + String controllerKey = deviceId == 0 ? VmDetailConstants.ROOT_DISK_CONTROLLER : VmDetailConstants.DATA_DISK_CONTROLLER; + String diskController = MapUtils.getString(controllerInfo, controllerKey); + DiskDef.DiskBus busType = DiskDef.DiskBus.fromValue(diskController); + if (diskController != null) { + logger.debug("Using controller '{}' from command specified as {} while attaching disk (deviceId={})", + diskController, controllerKey, deviceId); + return busType; + } + for (final DiskDef disk : disks) { + if (disk.getDeviceType() != DeviceType.DISK) { + continue; + } + if (disk.getBusType() == DiskDef.DiskBus.SCSI) { + return DiskDef.DiskBus.SCSI; + } else if (disk.getBusType() == DiskDef.DiskBus.VIRTIOBLK) { + return DiskDef.DiskBus.VIRTIOBLK; + } + } + return DiskDef.DiskBus.VIRTIO; + } + @Override public Answer attachVolume(final AttachCommand cmd) { final DiskTO disk = cmd.getDisk(); @@ -1565,7 +1579,8 @@ public class KVMStorageProcessor implements StorageProcessor { vol.getBytesReadRate(), vol.getBytesReadRateMax(), vol.getBytesReadRateMaxLength(), vol.getBytesWriteRate(), vol.getBytesWriteRateMax(), vol.getBytesWriteRateMaxLength(), vol.getIopsReadRate(), vol.getIopsReadRateMax(), vol.getIopsReadRateMaxLength(), - vol.getIopsWriteRate(), vol.getIopsWriteRateMax(), vol.getIopsWriteRateMaxLength(), volCacheMode, encryptDetails, disk.getDetails()); + vol.getIopsWriteRate(), vol.getIopsWriteRateMax(), vol.getIopsWriteRateMaxLength(), volCacheMode, + encryptDetails, disk.getDetails(), cmd.getControllerInfo()); return new AttachAnswer(disk); } catch (final LibvirtException e) { @@ -1602,7 +1617,7 @@ public class KVMStorageProcessor implements StorageProcessor { vol.getBytesReadRate(), vol.getBytesReadRateMax(), vol.getBytesReadRateMaxLength(), vol.getBytesWriteRate(), vol.getBytesWriteRateMax(), vol.getBytesWriteRateMaxLength(), vol.getIopsReadRate(), vol.getIopsReadRateMax(), vol.getIopsReadRateMaxLength(), - vol.getIopsWriteRate(), vol.getIopsWriteRateMax(), vol.getIopsWriteRateMaxLength(), volCacheMode, null, waitDetachDevice, null); + vol.getIopsWriteRate(), vol.getIopsWriteRateMax(), vol.getIopsWriteRateMaxLength(), volCacheMode, null, waitDetachDevice, null, null); storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath()); From d7bdbcc4ed7a9b71ea7581c30b83e36ea3571a8e Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Fri, 30 Jan 2026 10:05:50 +0530 Subject: [PATCH 109/126] pre-commit: fix inconsistent license (#12551) Fix inconsistent license with UI .vue file. Signed-off-by: Abhishek Kumar --- ui/src/views/network/ImportNetworkACL.vue | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ui/src/views/network/ImportNetworkACL.vue b/ui/src/views/network/ImportNetworkACL.vue index 2456a75af45..cb0c4a404ef 100644 --- a/ui/src/views/network/ImportNetworkACL.vue +++ b/ui/src/views/network/ImportNetworkACL.vue @@ -1,17 +1,17 @@ // Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file +// or more contributor license agreements. See the NOTICE file // distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file +// regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at +// with the License. You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. From bb8e7d39e6efc0a68f3536b607df6b5c2a01946e Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Fri, 30 Jan 2026 12:11:25 +0530 Subject: [PATCH 110/126] api,server: allow configuring repetitive alerts (#11325) * api,server: allow configuring repetitive alerts Fixes #6613 Introduces support for configuring additional alert types that can be published repeatedly, beyond the default set. Operators can now use the dynamic configuration `alert.allowed.repetitive.types` to specify a comma-separated list of alert type names that should be allowed for repetitive publication. Signed-off-by: Abhishek Kumar * add tests Signed-off-by: Abhishek Kumar * fix Signed-off-by: Abhishek Kumar * test fix Signed-off-by: Abhishek Kumar * allow repetition for custom alerts Signed-off-by: Abhishek Kumar * remove refactoring Signed-off-by: Abhishek Kumar --------- Signed-off-by: Abhishek Kumar --- .../apache/cloudstack/alert/AlertService.java | 44 +++++---- .../apache/cloudstack/api/ApiConstants.java | 1 + .../admin/resource/ListAlertTypesCmd.java | 12 ++- .../api/response/AlertTypeResponse.java | 12 ++- .../java/com/cloud/alert/AlertManager.java | 4 + .../com/cloud/alert/AlertManagerImpl.java | 72 +++++++++----- .../com/cloud/alert/AlertManagerImplTest.java | 98 +++++++++++++++++-- 7 files changed, 187 insertions(+), 56 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/alert/AlertService.java b/api/src/main/java/org/apache/cloudstack/alert/AlertService.java index c051ebb2da2..fcc87908bd5 100644 --- a/api/src/main/java/org/apache/cloudstack/alert/AlertService.java +++ b/api/src/main/java/org/apache/cloudstack/alert/AlertService.java @@ -24,18 +24,24 @@ import com.cloud.exception.InvalidParameterValueException; public interface AlertService { public static class AlertType { - private static Set defaultAlertTypes = new HashSet(); + private static final Set defaultAlertTypes = new HashSet<>(); private final String name; private final short type; + private final boolean repetitionAllowed; - private AlertType(short type, String name, boolean isDefault) { + private AlertType(short type, String name, boolean isDefault, boolean repetitionAllowed) { this.name = name; this.type = type; + this.repetitionAllowed = repetitionAllowed; if (isDefault) { defaultAlertTypes.add(this); } } + private AlertType(short type, String name, boolean isDefault) { + this(type, name, isDefault, false); + } + public static final AlertType ALERT_TYPE_MEMORY = new AlertType(Capacity.CAPACITY_TYPE_MEMORY, "ALERT.MEMORY", true); public static final AlertType ALERT_TYPE_CPU = new AlertType(Capacity.CAPACITY_TYPE_CPU, "ALERT.CPU", true); public static final AlertType ALERT_TYPE_STORAGE = new AlertType(Capacity.CAPACITY_TYPE_STORAGE, "ALERT.STORAGE", true); @@ -45,36 +51,36 @@ public interface AlertService { public static final AlertType ALERT_TYPE_VIRTUAL_NETWORK_IPV6_SUBNET = new AlertType(Capacity.CAPACITY_TYPE_VIRTUAL_NETWORK_IPV6_SUBNET, "ALERT.NETWORK.IPV6SUBNET", true); public static final AlertType ALERT_TYPE_PRIVATE_IP = new AlertType(Capacity.CAPACITY_TYPE_PRIVATE_IP, "ALERT.NETWORK.PRIVATEIP", true); public static final AlertType ALERT_TYPE_SECONDARY_STORAGE = new AlertType(Capacity.CAPACITY_TYPE_SECONDARY_STORAGE, "ALERT.STORAGE.SECONDARY", true); - public static final AlertType ALERT_TYPE_HOST = new AlertType((short)7, "ALERT.COMPUTE.HOST", true); - public static final AlertType ALERT_TYPE_USERVM = new AlertType((short)8, "ALERT.USERVM", true); - public static final AlertType ALERT_TYPE_DOMAIN_ROUTER = new AlertType((short)9, "ALERT.SERVICE.DOMAINROUTER", true); - public static final AlertType ALERT_TYPE_CONSOLE_PROXY = new AlertType((short)10, "ALERT.SERVICE.CONSOLEPROXY", true); + public static final AlertType ALERT_TYPE_HOST = new AlertType((short)7, "ALERT.COMPUTE.HOST", true, true); + public static final AlertType ALERT_TYPE_USERVM = new AlertType((short)8, "ALERT.USERVM", true, true); + public static final AlertType ALERT_TYPE_DOMAIN_ROUTER = new AlertType((short)9, "ALERT.SERVICE.DOMAINROUTER", true, true); + public static final AlertType ALERT_TYPE_CONSOLE_PROXY = new AlertType((short)10, "ALERT.SERVICE.CONSOLEPROXY", true, true); public static final AlertType ALERT_TYPE_ROUTING = new AlertType((short)11, "ALERT.NETWORK.ROUTING", true); - public static final AlertType ALERT_TYPE_STORAGE_MISC = new AlertType((short)12, "ALERT.STORAGE.MISC", true); + public static final AlertType ALERT_TYPE_STORAGE_MISC = new AlertType((short)12, "ALERT.STORAGE.MISC", true, true); public static final AlertType ALERT_TYPE_USAGE_SERVER = new AlertType((short)13, "ALERT.USAGE", true); - public static final AlertType ALERT_TYPE_MANAGEMENT_NODE = new AlertType((short)14, "ALERT.MANAGEMENT", true); + public static final AlertType ALERT_TYPE_MANAGEMENT_NODE = new AlertType((short)14, "ALERT.MANAGEMENT", true, true); public static final AlertType ALERT_TYPE_DOMAIN_ROUTER_MIGRATE = new AlertType((short)15, "ALERT.NETWORK.DOMAINROUTERMIGRATE", true); public static final AlertType ALERT_TYPE_CONSOLE_PROXY_MIGRATE = new AlertType((short)16, "ALERT.SERVICE.CONSOLEPROXYMIGRATE", true); public static final AlertType ALERT_TYPE_USERVM_MIGRATE = new AlertType((short)17, "ALERT.USERVM.MIGRATE", true); public static final AlertType ALERT_TYPE_VLAN = new AlertType((short)18, "ALERT.NETWORK.VLAN", true); - public static final AlertType ALERT_TYPE_SSVM = new AlertType((short)19, "ALERT.SERVICE.SSVM", true); + public static final AlertType ALERT_TYPE_SSVM = new AlertType((short)19, "ALERT.SERVICE.SSVM", true, true); public static final AlertType ALERT_TYPE_USAGE_SERVER_RESULT = new AlertType((short)20, "ALERT.USAGE.RESULT", true); public static final AlertType ALERT_TYPE_STORAGE_DELETE = new AlertType((short)21, "ALERT.STORAGE.DELETE", true); public static final AlertType ALERT_TYPE_UPDATE_RESOURCE_COUNT = new AlertType((short)22, "ALERT.RESOURCE.COUNT", true); public static final AlertType ALERT_TYPE_USAGE_SANITY_RESULT = new AlertType((short)23, "ALERT.USAGE.SANITY", true); public static final AlertType ALERT_TYPE_DIRECT_ATTACHED_PUBLIC_IP = new AlertType((short)24, "ALERT.NETWORK.DIRECTPUBLICIP", true); public static final AlertType ALERT_TYPE_LOCAL_STORAGE = new AlertType((short)25, "ALERT.STORAGE.LOCAL", true); - public static final AlertType ALERT_TYPE_RESOURCE_LIMIT_EXCEEDED = new AlertType((short)26, "ALERT.RESOURCE.EXCEED", true); + public static final AlertType ALERT_TYPE_RESOURCE_LIMIT_EXCEEDED = new AlertType((short)26, "ALERT.RESOURCE.EXCEED", true, true); public static final AlertType ALERT_TYPE_SYNC = new AlertType((short)27, "ALERT.TYPE.SYNC", true); - public static final AlertType ALERT_TYPE_UPLOAD_FAILED = new AlertType((short)28, "ALERT.UPLOAD.FAILED", true); - public static final AlertType ALERT_TYPE_OOBM_AUTH_ERROR = new AlertType((short)29, "ALERT.OOBM.AUTHERROR", true); - public static final AlertType ALERT_TYPE_HA_ACTION = new AlertType((short)30, "ALERT.HA.ACTION", true); - public static final AlertType ALERT_TYPE_CA_CERT = new AlertType((short)31, "ALERT.CA.CERT", true); + public static final AlertType ALERT_TYPE_UPLOAD_FAILED = new AlertType((short)28, "ALERT.UPLOAD.FAILED", true, true); + public static final AlertType ALERT_TYPE_OOBM_AUTH_ERROR = new AlertType((short)29, "ALERT.OOBM.AUTHERROR", true, true); + public static final AlertType ALERT_TYPE_HA_ACTION = new AlertType((short)30, "ALERT.HA.ACTION", true, true); + public static final AlertType ALERT_TYPE_CA_CERT = new AlertType((short)31, "ALERT.CA.CERT", true, true); public static final AlertType ALERT_TYPE_VM_SNAPSHOT = new AlertType((short)32, "ALERT.VM.SNAPSHOT", true); public static final AlertType ALERT_TYPE_VR_PUBLIC_IFACE_MTU = new AlertType((short)33, "ALERT.VR.PUBLIC.IFACE.MTU", true); public static final AlertType ALERT_TYPE_VR_PRIVATE_IFACE_MTU = new AlertType((short)34, "ALERT.VR.PRIVATE.IFACE.MTU", true); - public static final AlertType ALERT_TYPE_EXTENSION_PATH_NOT_READY = new AlertType((short)33, "ALERT.TYPE.EXTENSION.PATH.NOT.READY", true); - public static final AlertType ALERT_TYPE_VPN_GATEWAY_OBSOLETE_PARAMETERS = new AlertType((short)34, "ALERT.S2S.VPN.GATEWAY.OBSOLETE.PARAMETERS", true); + public static final AlertType ALERT_TYPE_EXTENSION_PATH_NOT_READY = new AlertType((short)33, "ALERT.TYPE.EXTENSION.PATH.NOT.READY", true, true); + public static final AlertType ALERT_TYPE_VPN_GATEWAY_OBSOLETE_PARAMETERS = new AlertType((short)34, "ALERT.S2S.VPN.GATEWAY.OBSOLETE.PARAMETERS", true, true); public static final AlertType ALERT_TYPE_BACKUP_STORAGE = new AlertType(Capacity.CAPACITY_TYPE_BACKUP_STORAGE, "ALERT.STORAGE.BACKUP", true); public static final AlertType ALERT_TYPE_OBJECT_STORAGE = new AlertType(Capacity.CAPACITY_TYPE_OBJECT_STORAGE, "ALERT.STORAGE.OBJECT", true); @@ -86,6 +92,10 @@ public interface AlertService { return name; } + public boolean isRepetitionAllowed() { + return repetitionAllowed; + } + private static AlertType getAlertType(short type) { for (AlertType alertType : defaultAlertTypes) { if (alertType.getType() == type) { @@ -109,7 +119,7 @@ public interface AlertService { if (defaultAlert != null && !defaultAlert.getName().equalsIgnoreCase(name)) { throw new InvalidParameterValueException("There is a default alert having type " + type + " and name " + defaultAlert.getName()); } else { - return new AlertType(type, name, false); + return new AlertType(type, name, false, false); } } } diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index d00e339de2f..790070e5244 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -503,6 +503,7 @@ public class ApiConstants { public static final String RECONNECT = "reconnect"; public static final String RECOVER = "recover"; public static final String REPAIR = "repair"; + public static final String REPETITION_ALLOWED = "repetitionallowed"; public static final String REQUIRES_HVM = "requireshvm"; public static final String RESOURCES = "resources"; public static final String RESOURCE_COUNT = "resourcecount"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ListAlertTypesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ListAlertTypesCmd.java index e7bfbdbc625..dcd4f2c89ef 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ListAlertTypesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ListAlertTypesCmd.java @@ -16,7 +16,10 @@ // under the License. package org.apache.cloudstack.api.command.admin.resource; -import com.cloud.user.Account; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + import org.apache.cloudstack.alert.AlertService; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseCmd; @@ -24,9 +27,7 @@ import org.apache.cloudstack.api.response.AlertResponse; import org.apache.cloudstack.api.response.AlertTypeResponse; import org.apache.cloudstack.api.response.ListResponse; -import java.util.ArrayList; -import java.util.List; -import java.util.Set; +import com.cloud.user.Account; @APICommand(name = "listAlertTypes", description = "Lists all alerts types", responseObject = AlertResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) @@ -43,7 +44,8 @@ public class ListAlertTypesCmd extends BaseCmd { ListResponse response = new ListResponse<>(); List typeResponseList = new ArrayList<>(); for (AlertService.AlertType alertType : result) { - AlertTypeResponse alertResponse = new AlertTypeResponse(alertType.getType(), alertType.getName()); + AlertTypeResponse alertResponse = new AlertTypeResponse(alertType.getType(), alertType.getName(), + alertType.isRepetitionAllowed()); alertResponse.setObjectName("alerttype"); typeResponseList.add(alertResponse); } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/AlertTypeResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/AlertTypeResponse.java index 3f91cde0178..e8c3cf6c4ac 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/AlertTypeResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/AlertTypeResponse.java @@ -16,11 +16,12 @@ // under the License. package org.apache.cloudstack.api.response; -import com.cloud.serializer.Param; -import com.google.gson.annotations.SerializedName; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseResponse; +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + public class AlertTypeResponse extends BaseResponse { @SerializedName("alerttypeid") @@ -31,6 +32,10 @@ public class AlertTypeResponse extends BaseResponse { @Param(description = "description of alert type") private String name; + @SerializedName(ApiConstants.REPETITION_ALLOWED) + @Param(description = "Whether repetitive alerts allowed for the alert type", since = "4.22.0") + private boolean repetitionAllowed = true; + public String getName() { return name; } @@ -47,9 +52,10 @@ public class AlertTypeResponse extends BaseResponse { this.alertType = alertType; } - public AlertTypeResponse(short alertType, String name) { + public AlertTypeResponse(short alertType, String name, boolean repetitionAllowed) { this.alertType = alertType; this.name = name; + this.repetitionAllowed = repetitionAllowed; setObjectName("alerttype"); } } diff --git a/engine/components-api/src/main/java/com/cloud/alert/AlertManager.java b/engine/components-api/src/main/java/com/cloud/alert/AlertManager.java index 7fe19c3ba9f..46993b066a4 100644 --- a/engine/components-api/src/main/java/com/cloud/alert/AlertManager.java +++ b/engine/components-api/src/main/java/com/cloud/alert/AlertManager.java @@ -49,6 +49,10 @@ public interface AlertManager extends Manager, AlertService { "Percentage (as a value between 0 and 1) of guest network IPv6 subnet utilization above which alerts will be sent.", true); + ConfigKey AllowedRepetitiveAlertTypes = new ConfigKey<>(ConfigKey.CATEGORY_ALERT, String.class, + "alert.allowed.repetitive.types", "", + "Comma-separated list of alert types (by name) that can be sent multiple times", true); + void clearAlert(AlertType alertType, long dataCenterId, long podId); void recalculateCapacity(); diff --git a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java index 308c4443cb8..27b445ba376 100644 --- a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java @@ -19,7 +19,6 @@ package com.cloud.alert; import java.io.UnsupportedEncodingException; import java.text.DecimalFormat; import java.util.ArrayList; -import java.util.Arrays; import java.util.Date; import java.util.HashMap; import java.util.HashSet; @@ -37,15 +36,12 @@ import javax.inject.Inject; import javax.mail.MessagingException; import javax.naming.ConfigurationException; -import com.cloud.dc.DataCenter; -import com.cloud.dc.Pod; -import com.cloud.org.Cluster; - import org.apache.cloudstack.backup.BackupManager; import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.managed.context.ManagedContextTimerTask; import org.apache.cloudstack.storage.datastore.db.ObjectStoreDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -54,6 +50,7 @@ import org.apache.cloudstack.utils.mailing.MailAddress; import org.apache.cloudstack.utils.mailing.SMTPMailProperties; import org.apache.cloudstack.utils.mailing.SMTPMailSender; import org.apache.commons.lang3.ArrayUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.math.NumberUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -70,9 +67,11 @@ import com.cloud.capacity.dao.CapacityDaoImpl.SummedCapacity; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenter.NetworkType; import com.cloud.dc.DataCenterVO; import com.cloud.dc.HostPodVO; +import com.cloud.dc.Pod; import com.cloud.dc.Vlan.VlanType; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; @@ -86,10 +85,12 @@ import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.network.Ipv6Service; import com.cloud.network.dao.IPAddressDao; +import com.cloud.org.Cluster; import com.cloud.org.Grouping.AllocationState; import com.cloud.resource.ResourceManager; import com.cloud.storage.StorageManager; import com.cloud.utils.Pair; +import com.cloud.utils.Ternary; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.SearchCriteria; @@ -100,21 +101,6 @@ import com.cloud.utils.db.TransactionStatus; public class AlertManagerImpl extends ManagerBase implements AlertManager, Configurable { protected Logger logger = LogManager.getLogger(AlertManagerImpl.class.getName()); - public static final List ALERTS = Arrays.asList(AlertType.ALERT_TYPE_HOST - , AlertType.ALERT_TYPE_USERVM - , AlertType.ALERT_TYPE_DOMAIN_ROUTER - , AlertType.ALERT_TYPE_CONSOLE_PROXY - , AlertType.ALERT_TYPE_SSVM - , AlertType.ALERT_TYPE_STORAGE_MISC - , AlertType.ALERT_TYPE_MANAGEMENT_NODE - , AlertType.ALERT_TYPE_RESOURCE_LIMIT_EXCEEDED - , AlertType.ALERT_TYPE_UPLOAD_FAILED - , AlertType.ALERT_TYPE_OOBM_AUTH_ERROR - , AlertType.ALERT_TYPE_HA_ACTION - , AlertType.ALERT_TYPE_CA_CERT - , AlertType.ALERT_TYPE_EXTENSION_PATH_NOT_READY - , AlertType.ALERT_TYPE_VPN_GATEWAY_OBSOLETE_PARAMETERS); - private static final long INITIAL_CAPACITY_CHECK_DELAY = 30L * 1000L; // Thirty seconds expressed in milliseconds. private static final DecimalFormat DfPct = new DecimalFormat("###.##"); @@ -156,6 +142,8 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi Ipv6Service ipv6Service; @Inject HostDao hostDao; + @Inject + MessageBus messageBus; private Timer _timer = null; private long _capacityCheckPeriod = 60L * 60L * 1000L; // One hour by default. @@ -175,6 +163,8 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi protected String[] recipients = null; protected String senderAddress = null; + private final List allowedRepetitiveAlertTypeNames = new ArrayList<>(); + public AlertManagerImpl() { _executor = Executors.newCachedThreadPool(new NamedThreadFactory("Email-Alerts-Sender")); } @@ -254,12 +244,32 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi _capacityCheckPeriod = Long.parseLong(Config.CapacityCheckPeriod.getDefaultValue()); } } + initMessageBusListener(); + setupRepetitiveAlertTypes(); _timer = new Timer("CapacityChecker"); return true; } + protected void setupRepetitiveAlertTypes() { + allowedRepetitiveAlertTypeNames.clear(); + String allowedRepetitiveAlertsStr = AllowedRepetitiveAlertTypes.value(); + logger.trace("Allowed repetitive alert types specified by {}: {} ", AllowedRepetitiveAlertTypes.key(), + allowedRepetitiveAlertsStr); + if (StringUtils.isBlank(allowedRepetitiveAlertsStr)) { + return; + } + String[] allowedRepetitiveAlertTypesArray = allowedRepetitiveAlertsStr.split(","); + for (String allowedTypeName : allowedRepetitiveAlertTypesArray) { + if (StringUtils.isBlank(allowedTypeName)) { + continue; + } + allowedRepetitiveAlertTypeNames.add(allowedTypeName.toLowerCase()); + } + logger.trace("{} alert types specified for repetitive alerts", allowedRepetitiveAlertTypeNames.size()); + } + @Override public boolean start() { _timer.schedule(new CapacityChecker(), INITIAL_CAPACITY_CHECK_DELAY, _capacityCheckPeriod); @@ -850,11 +860,11 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi @Nullable private AlertVO getAlertForTrivialAlertType(AlertType alertType, long dataCenterId, Long podId, Long clusterId) { - AlertVO alert = null; - if (!ALERTS.contains(alertType)) { - alert = _alertDao.getLastAlert(alertType.getType(), dataCenterId, podId, clusterId); + if (alertType.isRepetitionAllowed() || (StringUtils.isNotBlank(alertType.getName()) && + allowedRepetitiveAlertTypeNames.contains(alertType.getName().toLowerCase()))) { + return null; } - return alert; + return _alertDao.getLastAlert(alertType.getType(), dataCenterId, podId, clusterId); } protected void sendMessage(SMTPMailProperties mailProps) { @@ -883,7 +893,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi @Override public ConfigKey[] getConfigKeys() { return new ConfigKey[] {CPUCapacityThreshold, MemoryCapacityThreshold, StorageAllocatedCapacityThreshold, StorageCapacityThreshold, AlertSmtpEnabledSecurityProtocols, - AlertSmtpUseStartTLS, Ipv6SubnetCapacityThreshold, AlertSmtpUseAuth}; + AlertSmtpUseStartTLS, Ipv6SubnetCapacityThreshold, AlertSmtpUseAuth, AllowedRepetitiveAlertTypes}; } @Override @@ -897,4 +907,16 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi return false; } } + + @SuppressWarnings("unchecked") + protected void initMessageBusListener() { + messageBus.subscribe(EventTypes.EVENT_CONFIGURATION_VALUE_EDIT, (senderAddress, subject, args) -> { + Ternary updatedSetting = (Ternary) args; + String updatedSettingName = updatedSetting.first(); + if (!AllowedRepetitiveAlertTypes.key().equals(updatedSettingName)) { + return; + } + setupRepetitiveAlertTypes(); + }); + } } diff --git a/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java b/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java index 170fceae986..b5932e8a071 100644 --- a/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java +++ b/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java @@ -16,6 +16,12 @@ // under the License. package com.cloud.alert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; + import java.io.UnsupportedEncodingException; import java.util.HashMap; import java.util.List; @@ -25,7 +31,10 @@ import javax.mail.MessagingException; import javax.naming.ConfigurationException; import org.apache.cloudstack.backup.BackupManager; +import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.messagebus.MessageBus; +import org.apache.cloudstack.framework.messagebus.MessageSubscriber; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.utils.mailing.SMTPMailSender; @@ -40,6 +49,7 @@ import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; import com.cloud.alert.dao.AlertDao; import com.cloud.capacity.Capacity; @@ -52,16 +62,12 @@ import com.cloud.dc.HostPodVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; +import com.cloud.event.EventTypes; import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.storage.StorageManager; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.verify; +import com.cloud.utils.Ternary; @RunWith(MockitoJUnitRunner.class) public class AlertManagerImplTest { @@ -112,6 +118,9 @@ public class AlertManagerImplTest { @Mock ConfigurationDao configDao; + @Mock + MessageBus messageBus; + private final String[] recipients = new String[]{"test@test.com"}; private final String senderAddress = "sender@test.com"; @@ -268,4 +277,81 @@ public class AlertManagerImplTest { assertEquals("Available backup storage space is low, total: 200.0 MB, used: 180.0 MB (90%)", capturedAlert.getContent()); assertEquals(AlertManager.AlertType.ALERT_TYPE_BACKUP_STORAGE.getType(), capturedAlert.getType()); } + + @Test + public void initMessageBusListenerSubscribesToConfigurationEditEvent() { + MessageBus messageBusMock = Mockito.mock(MessageBus.class); + alertManagerImplMock.messageBus = messageBusMock; + alertManagerImplMock.initMessageBusListener(); + Mockito.verify(messageBusMock).subscribe(Mockito.eq(EventTypes.EVENT_CONFIGURATION_VALUE_EDIT), Mockito.any()); + } + + @Test + public void initMessageBusListenerTriggersSetupRepetitiveAlertTypesOnAllowedKeyEdit() { + MessageBus messageBusMock = Mockito.mock(MessageBus.class); + alertManagerImplMock.messageBus = messageBusMock; + alertManagerImplMock.initMessageBusListener(); + ArgumentCaptor captor = ArgumentCaptor.forClass(MessageSubscriber.class); + Mockito.verify(messageBusMock).subscribe(Mockito.eq(EventTypes.EVENT_CONFIGURATION_VALUE_EDIT), captor.capture()); + Ternary args = new Ternary<>(AlertManager.AllowedRepetitiveAlertTypes.key(), ConfigKey.Scope.Global, 1L); + captor.getValue().onPublishMessage(null, null, args); + Mockito.verify(alertManagerImplMock).setupRepetitiveAlertTypes(); + } + + @Test + public void initMessageBusListenerDoesNotTriggerSetupRepetitiveAlertTypesOnOtherKeyEdit() { + MessageBus messageBusMock = Mockito.mock(MessageBus.class); + alertManagerImplMock.messageBus = messageBusMock; + alertManagerImplMock.initMessageBusListener(); + ArgumentCaptor captor = ArgumentCaptor.forClass(MessageSubscriber.class); + Mockito.verify(messageBusMock).subscribe(Mockito.eq(EventTypes.EVENT_CONFIGURATION_VALUE_EDIT), captor.capture()); + Ternary args = new Ternary<>("some.other.key", ConfigKey.Scope.Global, 1L); + captor.getValue().onPublishMessage(null, null, args); + Mockito.verify(alertManagerImplMock, Mockito.never()).setupRepetitiveAlertTypes(); + } + + private void mockAllowedRepetitiveAlertTypesConfigKey(String value) { + ReflectionTestUtils.setField(AlertManager.AllowedRepetitiveAlertTypes, "_defaultValue", value); + } + + @Test + public void setupRepetitiveAlertTypesParsesValidAlertTypesCorrectly() { + mockAllowedRepetitiveAlertTypesConfigKey(AlertManager.AlertType.ALERT_TYPE_CPU.getName() + "," + AlertManager.AlertType.ALERT_TYPE_MEMORY.getName()); + alertManagerImplMock.setupRepetitiveAlertTypes(); + List expectedTypes = (List)ReflectionTestUtils.getField(alertManagerImplMock, "allowedRepetitiveAlertTypeNames"); + Assert.assertNotNull(expectedTypes); + Assert.assertEquals(2, expectedTypes.size()); + Assert.assertTrue(expectedTypes.contains(AlertManager.AlertType.ALERT_TYPE_CPU.getName().toLowerCase())); + Assert.assertTrue(expectedTypes.contains(AlertManager.AlertType.ALERT_TYPE_MEMORY.getName().toLowerCase())); + } + + @Test + public void setupRepetitiveAlertTypesHandlesEmptyConfigValue() { + mockAllowedRepetitiveAlertTypesConfigKey(""); + alertManagerImplMock.setupRepetitiveAlertTypes(); + List expectedTypes = (List)ReflectionTestUtils.getField(alertManagerImplMock, "allowedRepetitiveAlertTypeNames"); + Assert.assertNotNull(expectedTypes); + Assert.assertTrue(expectedTypes.isEmpty()); + } + + @Test + public void setupRepetitiveAlertTypesIgnoresCustomAlertTypes() { + String customAlertTypeName = "CUSTOM_ALERT_TYPE"; + mockAllowedRepetitiveAlertTypesConfigKey(AlertManager.AlertType.ALERT_TYPE_CPU.getName() + "," + customAlertTypeName); + alertManagerImplMock.setupRepetitiveAlertTypes(); + List expectedTypes = (List)ReflectionTestUtils.getField(alertManagerImplMock, "allowedRepetitiveAlertTypeNames"); + Assert.assertNotNull(expectedTypes); + Assert.assertEquals(2, expectedTypes.size()); + Assert.assertTrue(expectedTypes.contains(AlertManager.AlertType.ALERT_TYPE_CPU.getName().toLowerCase())); + Assert.assertTrue(expectedTypes.contains(customAlertTypeName.toLowerCase())); + } + + @Test + public void setupRepetitiveAlertTypesHandlesNullConfigValue() { + mockAllowedRepetitiveAlertTypesConfigKey(null); + alertManagerImplMock.setupRepetitiveAlertTypes(); + List expectedTypes = (List)ReflectionTestUtils.getField(alertManagerImplMock, "allowedRepetitiveAlertTypeNames"); + Assert.assertNotNull(expectedTypes); + Assert.assertTrue(expectedTypes.isEmpty()); + } } From bac3421da437d0d3415e83fb3350a8584756e4a7 Mon Sep 17 00:00:00 2001 From: Dattu Date: Fri, 30 Jan 2026 12:12:41 +0530 Subject: [PATCH 111/126] Fixed: unnecessary regeneration of SSH keys in developer mode when they already existed. (#12059) --- .../src/main/java/com/cloud/server/ConfigurationServerImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java index 51793f22e90..8f10dd84b54 100644 --- a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java @@ -614,7 +614,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } // FIXME: take a global database lock here for safety. boolean onWindows = isOnWindows(); - if(!onWindows) { + if (!onWindows && !(privkeyfile.exists() && pubkeyfile.exists())) { Script.runSimpleBashScript("if [ -f " + privkeyfile + " ]; then rm -f " + privkeyfile + "; fi; ssh-keygen -t ecdsa -m PEM -N '' -f " + privkeyfile + " -q 2>/dev/null || ssh-keygen -t ecdsa -N '' -f " + privkeyfile + " -q"); } From bb391c3deb1c4a4172f91a60898decac33854246 Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Fri, 30 Jan 2026 07:50:44 +0100 Subject: [PATCH 112/126] test: increase timeout in test_human_readable_logs.py (#11972) --- test/integration/smoke/test_human_readable_logs.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/integration/smoke/test_human_readable_logs.py b/test/integration/smoke/test_human_readable_logs.py index fb972511f9c..c2478bfe576 100644 --- a/test/integration/smoke/test_human_readable_logs.py +++ b/test/integration/smoke/test_human_readable_logs.py @@ -48,9 +48,9 @@ class TestHumanReadableLogs(cloudstackTestCase): sshClient.execute(command) # CapacityChecker runs as soon as management server is up - # Check if "usedMem: (" is printed out within 60 seconds while server is starting - command = "timeout 60 tail -f /var/log/cloudstack/management/management-server.log | grep 'usedMem: ('" - sshClient.timeout = 60 + # Check if "usedMem: (" is printed out within 120 seconds while server is starting + command = "timeout 120 tail -f /var/log/cloudstack/management/management-server.log | grep 'usedMem: ('" + sshClient.timeout = 120 result = sshClient.runCommand(command) self.assertTrue(result['status'] == "FAILED") @@ -70,7 +70,7 @@ class TestHumanReadableLogs(cloudstackTestCase): sshClient.execute(command) # CapacityChecker runs as soon as management server is up - # Check if "usedMem: (" is printed out within 60 seconds while server is restarting + # Check if "usedMem: (" is printed out within 120 seconds while server is restarting command = "timeout 120 tail -f /var/log/cloudstack/management/management-server.log | grep 'usedMem: ('" sshClient.timeout = 120 result = sshClient.runCommand(command) From 45d623ec0d86ef040028b7cbc8339f7d0c7456f4 Mon Sep 17 00:00:00 2001 From: Vishesh <8760112+vishesh92@users.noreply.github.com> Date: Fri, 30 Jan 2026 12:21:47 +0530 Subject: [PATCH 113/126] Update search filter size dynamically (#12552) --- ui/src/components/view/SearchFilter.vue | 3 ++- ui/src/views/AutogenView.vue | 13 ++++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/ui/src/components/view/SearchFilter.vue b/ui/src/components/view/SearchFilter.vue index ed950c094a3..9849ba8de44 100644 --- a/ui/src/components/view/SearchFilter.vue +++ b/ui/src/components/view/SearchFilter.vue @@ -24,7 +24,7 @@ v-for="filter in this.searchFilters" :key="filter.key + filter.value" > - + ({ ...filter })) + this.searchFilters = clonedFilters.map(f => ({ ...f })) const promises = [] for (let idx = 0; idx < clonedFilters.length; idx++) { const filter = clonedFilters[idx] diff --git a/ui/src/views/AutogenView.vue b/ui/src/views/AutogenView.vue index 6acc81d6a02..6c146875ece 100644 --- a/ui/src/views/AutogenView.vue +++ b/ui/src/views/AutogenView.vue @@ -17,7 +17,10 @@